From cea4a90ee4ad6234fee9c93eb75f67f0f4434bd9 Mon Sep 17 00:00:00 2001 From: Ben Jackson Date: Sat, 8 Aug 2020 20:38:44 +0100 Subject: [PATCH 01/10] Initial support for semantic tokens --- ycmd/completers/completer.py | 4 + .../language_server_completer.py | 82 ++++++++++++++++ .../language_server_protocol.py | 93 +++++++++++++++---- ycmd/handlers.py | 27 ++++++ ycmd/responses.py | 8 ++ 5 files changed, 196 insertions(+), 18 deletions(-) diff --git a/ycmd/completers/completer.py b/ycmd/completers/completer.py index 83d3d6d5b0..085d3ee125 100644 --- a/ycmd/completers/completer.py +++ b/ycmd/completers/completer.py @@ -369,6 +369,10 @@ def ComputeSignaturesInner( self, request_data ): return {} + def ComputeSemanticTokens( self, request_data ): + return {} + + def DefinedSubcommands( self ): subcommands = sorted( self.GetSubcommandsMap().keys() ) try: diff --git a/ycmd/completers/language_server/language_server_completer.py b/ycmd/completers/language_server/language_server_completer.py index be9acb680f..4f91e7b94d 100644 --- a/ycmd/completers/language_server/language_server_completer.py +++ b/ycmd/completers/language_server/language_server_completer.py @@ -1496,6 +1496,7 @@ def SignatureHelpAvailable( self ): else: return responses.SignatureHelpAvailalability.NOT_AVAILABLE + def ComputeSignaturesInner( self, request_data ): if not self.ServerIsReady(): return {} @@ -1536,6 +1537,87 @@ def ComputeSignaturesInner( self, request_data ): return result + def ComputeSemanticTokens( self, request_data ): + server_config = self._server_capabilities.get( 'semanticTokensProvider' ) + if server_config is None: + return {} + + class Atlas: + def __init__( self, legend ): + self.tokenTypes = legend[ 'tokenTypes' ] + self.tokenModifiers = legend[ 'tokenModifiers' ] + + atlas = Atlas( server_config[ 'legend' ] ) + + server_full_support = server_config.get( 'full' ) + if server_full_support == {}: + server_full_support = True + + if not server_full_support: + return {} + + request_id = self.GetConnection().NextRequestId() + response = self._connection.GetResponse( + request_id, + lsp.SemanticTokens( request_id, request_data ), + REQUEST_TIMEOUT_COMPLETION ) + + if response is None: + return {} + + token_data = ( response.get( 'result' ) or {} ).get( 'data' ) or [] + assert len( token_data ) % 5 == 0 + + class Token: + line = 0 + start_character = 0 + num_characters = 0 + token_type = 0 + token_modifiers = 0 + + tokens = [] + last_token = Token() + filename = request_data[ 'filepath' ] + contents = GetFileLines( request_data, filename ) + + for token_index in range( 0, len( token_data ), 5 ): + token = Token() + + token.line = last_token.line + token_data[ token_index ] + + token.start_character = token_data[ token_index + 1 ] + if token.line == last_token.line: + token.start_character += last_token.start_character + + token.num_characters = token_data[ token_index + 2 ] + + token.token_type = token_data[ token_index + 3 ] + token.token_modifiers = token_data[ token_index + 4 ] + + tokens.append( { + 'range': responses.BuildRangeData( _BuildRange( + contents, + filename, + { + 'start': { + 'line': token.line, + 'character': token.start_character, + }, + 'end': { + 'line': token.line, + 'character': token.start_character + token.num_characters, + } + } + ) ), + 'type': atlas.tokenTypes[ token.token_type ], + 'modifiers': [] # TODO: bits represent indexes in atlas + } ) + + last_token = token + + return { 'tokens': tokens } + + def GetDetailedDiagnostic( self, request_data ): self._UpdateServerWithFileContents( request_data ) diff --git a/ycmd/completers/language_server/language_server_protocol.py b/ycmd/completers/language_server/language_server_protocol.py index fd7bbffe46..ac8834b353 100644 --- a/ycmd/completers/language_server/language_server_protocol.py +++ b/ycmd/completers/language_server/language_server_protocol.py @@ -137,6 +137,32 @@ class Errors: 'TypeParameter', ] +TOKEN_TYPES = [ + 'namespace', + 'type', + 'class', + 'enum', + 'interface', + 'struct', + 'typeParameter', + 'parameter', + 'variable', + 'property', + 'enumMember', + 'event', + 'function', + 'member', + 'macro', + 'keyword', + 'modifier', + 'comment', + 'string', + 'number', + 'regexp', + 'operator', +] + +TOKEN_MODIFIERS = [] class InvalidUriException( Exception ): """Raised when trying to convert a server URI to a file path but the scheme @@ -327,6 +353,17 @@ def Initialize( request_id, project_directory, extra_capabilities, settings ): 'markdown' ], }, + 'semanticTokens': { + 'requests': { + 'range': False, + 'full': { + 'delta': False + } + }, + 'tokenTypes': TOKEN_TYPES, + 'tokenModifiers': TOKEN_MODIFIERS, + 'tokenFormats': [ 'relative' ] + } }, 'synchronization': { 'didSave': True @@ -445,9 +482,7 @@ def DidCloseTextDocument( file_state ): def Completion( request_id, request_data, codepoint ): return BuildRequest( request_id, 'textDocument/completion', { - 'textDocument': { - 'uri': FilePathToUri( request_data[ 'filepath' ] ), - }, + 'textDocument': TextDocumentIdentifier( request_data ), 'position': Position( request_data[ 'line_num' ], request_data[ 'line_value' ], codepoint ), @@ -497,9 +532,7 @@ def Implementation( request_id, request_data ): def CodeAction( request_id, request_data, best_match_range, diagnostics ): return BuildRequest( request_id, 'textDocument/codeAction', { - 'textDocument': { - 'uri': FilePathToUri( request_data[ 'filepath' ] ), - }, + 'textDocument': TextDocumentIdentifier( request_data ), 'range': best_match_range, 'context': { 'diagnostics': diagnostics, @@ -509,9 +542,7 @@ def CodeAction( request_id, request_data, best_match_range, diagnostics ): def Rename( request_id, request_data, new_name ): return BuildRequest( request_id, 'textDocument/rename', { - 'textDocument': { - 'uri': FilePathToUri( request_data[ 'filepath' ] ), - }, + 'textDocument': TextDocumentIdentifier( request_data ), 'newName': new_name, 'position': Position( request_data[ 'line_num' ], request_data[ 'line_value' ], @@ -533,11 +564,15 @@ def DocumentSymbol( request_id, request_data ): } ) +def TextDocumentIdentifier( request_data ): + return { + 'uri': FilePathToUri( request_data[ 'filepath' ] ), + } + + def BuildTextDocumentPositionParams( request_data ): return { - 'textDocument': { - 'uri': FilePathToUri( request_data[ 'filepath' ] ), - }, + 'textDocument': TextDocumentIdentifier( request_data ), 'position': Position( request_data[ 'line_num' ], request_data[ 'line_value' ], request_data[ 'column_codepoint' ] ) @@ -577,18 +612,14 @@ def CallHierarchy( request_id, direction, item ): def Formatting( request_id, request_data ): return BuildRequest( request_id, 'textDocument/formatting', { - 'textDocument': { - 'uri': FilePathToUri( request_data[ 'filepath' ] ), - }, + 'textDocument': TextDocumentIdentifier( request_data ), 'options': FormattingOptions( request_data ) } ) def RangeFormatting( request_id, request_data ): return BuildRequest( request_id, 'textDocument/rangeFormatting', { - 'textDocument': { - 'uri': FilePathToUri( request_data[ 'filepath' ] ), - }, + 'textDocument': TextDocumentIdentifier( request_data ), 'range': Range( request_data ), 'options': FormattingOptions( request_data ) } ) @@ -652,6 +683,32 @@ def ExecuteCommand( request_id, command, arguments ): } ) +def SemanticTokens( request_id, request_data ): + if 'range' in request_data: + return BuildRequest( request_id, 'textDocument/semanticTokens/range', { + 'textDocument': TextDocumentIdentifier( request_data ), + 'range': Range( request_data ) + } ) + else: + return BuildRequest( request_id, 'textDocument/semanticTokens/full', { + 'textDocument': TextDocumentIdentifier( request_data ), + } ) + + +def SemanticTokensDelta( request_id, previous_result_id, request_data ): + if 'range' in request_data: + raise ValueError( "LSP does not support range deltas" ) + + return BuildRequest( + request_id, + 'textDocument/semanticTokens/range/delta', + { + 'textDocument': TextDocumentIdentifier( request_data ), + 'previousResultId': previous_result_id + } + ) + + def FilePathToUri( file_name ): return urljoin( 'file:', pathname2url( file_name ) ) diff --git a/ycmd/handlers.py b/ycmd/handlers.py index b7c43c32ad..83708b5a18 100644 --- a/ycmd/handlers.py +++ b/ycmd/handlers.py @@ -30,6 +30,7 @@ BuildResolveCompletionResponse, BuildSignatureHelpResponse, BuildSignatureHelpAvailableResponse, + BuildSemanticTokensResponse, SignatureHelpAvailalability, UnknownExtraConf ) from ycmd.request_wrap import RequestWrap @@ -178,6 +179,32 @@ def GetSignatureHelp(): BuildSignatureHelpResponse( signature_info, errors = errors ) ) +@app.post( '/semantic_tokens' ) +def GetSemanticTokens(): + LOGGER.info( 'Received semantic tokens request' ) + request_data = RequestWrap( request.json ) + + if not _server_state.FiletypeCompletionUsable( request_data[ 'filetypes' ], + silent = True ): + return _JsonResponse( BuildSemanticTokensResponse( None ) ) + + errors = None + semantic_tokens = None + + try: + filetype_completer = _server_state.GetFiletypeCompleter( + request_data[ 'filetypes' ] ) + semantic_tokens = filetype_completer.ComputeSemanticTokens( request_data ) + except Exception as exception: + LOGGER.exception( 'Exception from semantic completer during sig help' ) + errors = [ BuildExceptionResponse( exception, traceback.format_exc() ) ] + + # No fallback for signature help. The general completer is unlikely to be able + # to offer anything of for that here. + return _JsonResponse( + BuildSemanticTokensResponse( semantic_tokens, errors = errors ) ) + + @app.post( '/filter_and_sort_candidates' ) def FilterAndSortCandidates(): # Not using RequestWrap because no need and the requests coming in aren't like diff --git a/ycmd/responses.py b/ycmd/responses.py index 555bdc8ef9..a5f6080db5 100644 --- a/ycmd/responses.py +++ b/ycmd/responses.py @@ -152,6 +152,14 @@ def BuildSignatureHelpResponse( signature_info, errors = None ): } +def BuildSemanticTokensResponse( semantic_tokens, errors = None ): + return { + 'semantic_tokens': + semantic_tokens if semantic_tokens else {}, + 'errors': errors if errors else [], + } + + # location.column_number_ is a byte offset def BuildLocationData( location ): return { From 03baff8cd76bb902332aaf3a6a2c3a515259589a Mon Sep 17 00:00:00 2001 From: Ben Jackson Date: Sat, 8 Aug 2020 22:53:37 +0100 Subject: [PATCH 02/10] The delta format is impossible to use, so don't even try; wait for initialisation and update files in semantic tokens request --- .../language_server_completer.py | 136 ++++++++++-------- .../language_server_protocol.py | 16 +-- 2 files changed, 81 insertions(+), 71 deletions(-) diff --git a/ycmd/completers/language_server/language_server_completer.py b/ycmd/completers/language_server/language_server_completer.py index 4f91e7b94d..a9f4bbecbd 100644 --- a/ycmd/completers/language_server/language_server_completer.py +++ b/ycmd/completers/language_server/language_server_completer.py @@ -1538,16 +1538,21 @@ def ComputeSignaturesInner( self, request_data ): def ComputeSemanticTokens( self, request_data ): + if not self._initialize_event.wait( REQUEST_TIMEOUT_COMPLETION ): + return {} + + if not self._ServerIsInitialized(): + return {} + + # FIXME: This all happens at the same time as OnFileReadyToParse, so this is + # all duplicated work + self._UpdateServerWithFileContents( request_data ) + server_config = self._server_capabilities.get( 'semanticTokensProvider' ) if server_config is None: return {} - class Atlas: - def __init__( self, legend ): - self.tokenTypes = legend[ 'tokenTypes' ] - self.tokenModifiers = legend[ 'tokenModifiers' ] - - atlas = Atlas( server_config[ 'legend' ] ) + atlas = TokenAtlas( server_config[ 'legend' ] ) server_full_support = server_config.get( 'full' ) if server_full_support == {}: @@ -1559,63 +1564,25 @@ def __init__( self, legend ): request_id = self.GetConnection().NextRequestId() response = self._connection.GetResponse( request_id, - lsp.SemanticTokens( request_id, request_data ), + lsp.SemanticTokens( + request_id, + request_data ), REQUEST_TIMEOUT_COMPLETION ) if response is None: return {} - token_data = ( response.get( 'result' ) or {} ).get( 'data' ) or [] - assert len( token_data ) % 5 == 0 - - class Token: - line = 0 - start_character = 0 - num_characters = 0 - token_type = 0 - token_modifiers = 0 - - tokens = [] - last_token = Token() filename = request_data[ 'filepath' ] contents = GetFileLines( request_data, filename ) - - for token_index in range( 0, len( token_data ), 5 ): - token = Token() - - token.line = last_token.line + token_data[ token_index ] - - token.start_character = token_data[ token_index + 1 ] - if token.line == last_token.line: - token.start_character += last_token.start_character - - token.num_characters = token_data[ token_index + 2 ] - - token.token_type = token_data[ token_index + 3 ] - token.token_modifiers = token_data[ token_index + 4 ] - - tokens.append( { - 'range': responses.BuildRangeData( _BuildRange( - contents, - filename, - { - 'start': { - 'line': token.line, - 'character': token.start_character, - }, - 'end': { - 'line': token.line, - 'character': token.start_character + token.num_characters, - } - } - ) ), - 'type': atlas.tokenTypes[ token.token_type ], - 'modifiers': [] # TODO: bits represent indexes in atlas - } ) - - last_token = token - - return { 'tokens': tokens } + result = response.get( 'result' ) or {} + tokens = _DecodeSemanticTokens( atlas, + result.get( 'data' ) or [], + filename, + contents ) + + return { + 'tokens': tokens + } def GetDetailedDiagnostic( self, request_data ): @@ -3414,3 +3381,60 @@ def on_deleted( self, event ): with self._server._server_info_mutex: msg = lsp.DidChangeWatchedFiles( event.src_path, 'delete' ) self._server.GetConnection().SendNotification( msg ) + + +class TokenAtlas: + def __init__( self, legend ): + self.tokenTypes = legend[ 'tokenTypes' ] + self.tokenModifiers = legend[ 'tokenModifiers' ] + + +def _DecodeSemanticTokens( atlas, token_data, filename, contents ): + assert len( token_data ) % 5 == 0 + + class Token: + line = 0 + start_character = 0 + num_characters = 0 + token_type = 0 + token_modifiers = 0 + + last_token = Token() + tokens = [] + + for token_index in range( 0, len( token_data ), 5 ): + token = Token() + + token.line = last_token.line + token_data[ token_index ] + + token.start_character = token_data[ token_index + 1 ] + if token.line == last_token.line: + token.start_character += last_token.start_character + + token.num_characters = token_data[ token_index + 2 ] + + token.token_type = token_data[ token_index + 3 ] + token.token_modifiers = token_data[ token_index + 4 ] + + tokens.append( { + 'range': responses.BuildRangeData( _BuildRange( + contents, + filename, + { + 'start': { + 'line': token.line, + 'character': token.start_character, + }, + 'end': { + 'line': token.line, + 'character': token.start_character + token.num_characters, + } + } + ) ), + 'type': atlas.tokenTypes[ token.token_type ], + 'modifiers': [] # TODO: bits represent indexes in atlas + } ) + + last_token = token + + return tokens diff --git a/ycmd/completers/language_server/language_server_protocol.py b/ycmd/completers/language_server/language_server_protocol.py index ac8834b353..b9e2a059cd 100644 --- a/ycmd/completers/language_server/language_server_protocol.py +++ b/ycmd/completers/language_server/language_server_protocol.py @@ -683,7 +683,7 @@ def ExecuteCommand( request_id, command, arguments ): } ) -def SemanticTokens( request_id, request_data ): +def SemanticTokens( request_id, request_data, previous_result_id = None ): if 'range' in request_data: return BuildRequest( request_id, 'textDocument/semanticTokens/range', { 'textDocument': TextDocumentIdentifier( request_data ), @@ -695,20 +695,6 @@ def SemanticTokens( request_id, request_data ): } ) -def SemanticTokensDelta( request_id, previous_result_id, request_data ): - if 'range' in request_data: - raise ValueError( "LSP does not support range deltas" ) - - return BuildRequest( - request_id, - 'textDocument/semanticTokens/range/delta', - { - 'textDocument': TextDocumentIdentifier( request_data ), - 'previousResultId': previous_result_id - } - ) - - def FilePathToUri( file_name ): return urljoin( 'file:', pathname2url( file_name ) ) From 7d1da9edd03ba8c870354f48c8b397f5ba737633 Mon Sep 17 00:00:00 2001 From: Ben Jackson Date: Sun, 9 Aug 2020 00:53:50 +0100 Subject: [PATCH 03/10] Try and minimise file operation churn --- .../language_server_completer.py | 68 +++++++++++-------- 1 file changed, 40 insertions(+), 28 deletions(-) diff --git a/ycmd/completers/language_server/language_server_completer.py b/ycmd/completers/language_server/language_server_completer.py index a9f4bbecbd..4a26b2052e 100644 --- a/ycmd/completers/language_server/language_server_completer.py +++ b/ycmd/completers/language_server/language_server_completer.py @@ -1279,7 +1279,7 @@ def ComputeCandidatesInner( self, request_data, codepoint ): if not self._is_completion_provider: return None, False - self._UpdateServerWithFileContents( request_data ) + self._UpdateServerWithCurrentFileContents( request_data ) request_id = self.GetConnection().NextRequestId() @@ -1504,11 +1504,10 @@ def ComputeSignaturesInner( self, request_data ): if not self._server_capabilities.get( 'signatureHelpProvider' ): return {} - self._UpdateServerWithFileContents( request_data ) + self._UpdateServerWithCurrentFileContents( request_data ) request_id = self.GetConnection().NextRequestId() msg = lsp.SignatureHelp( request_id, request_data ) - response = self.GetConnection().GetResponse( request_id, msg, REQUEST_TIMEOUT_COMPLETION ) @@ -1544,9 +1543,7 @@ def ComputeSemanticTokens( self, request_data ): if not self._ServerIsInitialized(): return {} - # FIXME: This all happens at the same time as OnFileReadyToParse, so this is - # all duplicated work - self._UpdateServerWithFileContents( request_data ) + self._UpdateServerWithCurrentFileContents( request_data ) server_config = self._server_capabilities.get( 'semanticTokensProvider' ) if server_config is None: @@ -2040,6 +2037,14 @@ def _AnySupportedFileType( self, file_types ): return False + def _UpdateServerWithCurrentFileContents( self, request_data ): + file_name = request_data[ 'filepath' ] + contents = GetFileContents( request_data, file_name ) + filetypes = request_data[ 'filetypes' ] + with self._server_info_mutex: + self._RefreshFileContentsUnderLock( file_name, contents, filetypes ) + + def _UpdateServerWithFileContents( self, request_data ): """Update the server with the current contents of all open buffers, and close any buffers no longer open. @@ -2052,6 +2057,32 @@ def _UpdateServerWithFileContents( self, request_data ): self._PurgeMissingFilesUnderLock( files_to_purge ) + def _RefreshFileContentsUnderLock( self, file_name, contents, file_types ): + file_state = self._server_file_state[ file_name ] + action = file_state.GetDirtyFileAction( contents ) + + LOGGER.debug( 'Refreshing file %s: State is %s/action %s', + file_name, + file_state.state, + action ) + + if action == lsp.ServerFileState.OPEN_FILE: + msg = lsp.DidOpenTextDocument( file_state, + file_types, + contents ) + + self.GetConnection().SendNotification( msg ) + elif action == lsp.ServerFileState.CHANGE_FILE: + # FIXME: DidChangeTextDocument doesn't actually do anything + # different from DidOpenTextDocument other than send the right + # message, because we don't actually have a mechanism for generating + # the diffs. This isn't strictly necessary, but might lead to + # performance problems. + msg = lsp.DidChangeTextDocument( file_state, contents ) + + self.GetConnection().SendNotification( msg ) + + def _UpdateDirtyFilesUnderLock( self, request_data ): for file_name, file_data in request_data[ 'file_data' ].items(): if not self._AnySupportedFileType( file_data[ 'filetypes' ] ): @@ -2062,29 +2093,10 @@ def _UpdateDirtyFilesUnderLock( self, request_data ): self.SupportedFiletypes() ) continue - file_state = self._server_file_state[ file_name ] - action = file_state.GetDirtyFileAction( file_data[ 'contents' ] ) + self._RefreshFileContentsUnderLock( file_name, + file_data[ 'contents' ], + file_data[ 'filetypes' ] ) - LOGGER.debug( 'Refreshing file %s: State is %s/action %s', - file_name, - file_state.state, - action ) - - if action == lsp.ServerFileState.OPEN_FILE: - msg = lsp.DidOpenTextDocument( file_state, - file_data[ 'filetypes' ], - file_data[ 'contents' ] ) - - self.GetConnection().SendNotification( msg ) - elif action == lsp.ServerFileState.CHANGE_FILE: - # FIXME: DidChangeTextDocument doesn't actually do anything - # different from DidOpenTextDocument other than send the right - # message, because we don't actually have a mechanism for generating - # the diffs. This isn't strictly necessary, but might lead to - # performance problems. - msg = lsp.DidChangeTextDocument( file_state, file_data[ 'contents' ] ) - - self.GetConnection().SendNotification( msg ) def _UpdateSavedFilesUnderLock( self, request_data ): From d6d4b625705b1f74efd4c53fcbdfc7ad13fb1d3d Mon Sep 17 00:00:00 2001 From: Ben Jackson Date: Sun, 9 Aug 2020 17:14:04 +0100 Subject: [PATCH 04/10] FixUp: Log message --- ycmd/handlers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ycmd/handlers.py b/ycmd/handlers.py index 83708b5a18..3c60131e70 100644 --- a/ycmd/handlers.py +++ b/ycmd/handlers.py @@ -196,7 +196,7 @@ def GetSemanticTokens(): request_data[ 'filetypes' ] ) semantic_tokens = filetype_completer.ComputeSemanticTokens( request_data ) except Exception as exception: - LOGGER.exception( 'Exception from semantic completer during sig help' ) + LOGGER.exception( 'Exception from semantic completer during tokens request' ) errors = [ BuildExceptionResponse( exception, traceback.format_exc() ) ] # No fallback for signature help. The general completer is unlikely to be able From 5aa52b507525b7fc93c635b1a97d81852fe012d5 Mon Sep 17 00:00:00 2001 From: Ben Jackson Date: Mon, 5 Apr 2021 18:19:57 +0100 Subject: [PATCH 05/10] Try and avoid errors for file changing, increase timeout and decode modifiers (probably) --- .../language_server_completer.py | 54 ++++++++++++++----- .../language_server_protocol.py | 1 + 2 files changed, 42 insertions(+), 13 deletions(-) diff --git a/ycmd/completers/language_server/language_server_completer.py b/ycmd/completers/language_server/language_server_completer.py index 4a26b2052e..79b2c952ce 100644 --- a/ycmd/completers/language_server/language_server_completer.py +++ b/ycmd/completers/language_server/language_server_completer.py @@ -137,7 +137,11 @@ class ResponseAbortedException( Exception ): class ResponseFailedException( Exception ): """Raised by LanguageServerConnection if a request returns an error""" - pass # pragma: no cover + def __init__( self, error ): + self.error_code = error.get( 'code' ) or 0 + self.error_message = error.get( 'message' ) or "No message" + super().__init__( f'Request failed: { self.error_code }: ' + f'{ self.error_message }' ) class IncompatibleCompletionException( Exception ): @@ -212,11 +216,7 @@ def AwaitResponse( self, timeout ): if 'error' in self._message: error = self._message[ 'error' ] - raise ResponseFailedException( - 'Request failed: ' - f'{ error.get( "code" ) or 0 }' - ': ' - f'{ error.get( "message" ) or "No message" }' ) + raise ResponseFailedException( error ) return self._message @@ -1559,12 +1559,23 @@ def ComputeSemanticTokens( self, request_data ): return {} request_id = self.GetConnection().NextRequestId() - response = self._connection.GetResponse( - request_id, - lsp.SemanticTokens( - request_id, - request_data ), - REQUEST_TIMEOUT_COMPLETION ) + + # Retry up to 3 times to avoid ContentModified errors + MAX_RETRY = 3 + for i in range( MAX_RETRY ): + try: + response = self._connection.GetResponse( + request_id, + lsp.SemanticTokens( + request_id, + request_data ), + 3 * REQUEST_TIMEOUT_COMPLETION ) + break + except ResponseFailedException as e: + if i < ( MAX_RETRY - 1 ) and e.error_code == lsp.Errors.ContentModified: + continue + else: + raise if response is None: return {} @@ -3411,6 +3422,23 @@ class Token: token_type = 0 token_modifiers = 0 + def DecodeModifiers( self, tokenModifiers ): + modifiers = [] + bit_index = 0 + while True: + bit_value = pow( 2, bit_index ) + + if bit_value > self.token_modifiers: + break + + if self.token_modifiers & bit_value: + modifiers.append( tokenModifiers[ bit_index ] ) + + bit_index += 1 + + return modifiers + + last_token = Token() tokens = [] @@ -3444,7 +3472,7 @@ class Token: } ) ), 'type': atlas.tokenTypes[ token.token_type ], - 'modifiers': [] # TODO: bits represent indexes in atlas + 'modifiers': token.DecodeModifiers( atlas.tokenModifiers ) } ) last_token = token diff --git a/ycmd/completers/language_server/language_server_protocol.py b/ycmd/completers/language_server/language_server_protocol.py index b9e2a059cd..9497644f83 100644 --- a/ycmd/completers/language_server/language_server_protocol.py +++ b/ycmd/completers/language_server/language_server_protocol.py @@ -151,6 +151,7 @@ class Errors: 'enumMember', 'event', 'function', + 'method', 'member', 'macro', 'keyword', From 1d2b0fe034e9093ed91ec0484712b7e906883a24 Mon Sep 17 00:00:00 2001 From: Ben Jackson Date: Sat, 27 Nov 2021 11:51:44 +0000 Subject: [PATCH 06/10] Fix flake8 errors --- .../language_server_completer.py | 35 ++++++++++--------- .../language_server_protocol.py | 1 + ycmd/handlers.py | 3 +- 3 files changed, 22 insertions(+), 17 deletions(-) diff --git a/ycmd/completers/language_server/language_server_completer.py b/ycmd/completers/language_server/language_server_completer.py index 79b2c952ce..68fe0d8bbf 100644 --- a/ycmd/completers/language_server/language_server_completer.py +++ b/ycmd/completers/language_server/language_server_completer.py @@ -1560,22 +1560,13 @@ def ComputeSemanticTokens( self, request_data ): request_id = self.GetConnection().NextRequestId() - # Retry up to 3 times to avoid ContentModified errors - MAX_RETRY = 3 - for i in range( MAX_RETRY ): - try: - response = self._connection.GetResponse( - request_id, - lsp.SemanticTokens( - request_id, - request_data ), - 3 * REQUEST_TIMEOUT_COMPLETION ) - break - except ResponseFailedException as e: - if i < ( MAX_RETRY - 1 ) and e.error_code == lsp.Errors.ContentModified: - continue - else: - raise + body = lsp.SemanticTokens( request_id, request_data ) + + for _ in RetryOnFailure( [ lsp.Errors.ContentModified ] ): + response = self._connection.GetResponse( + request_id, + body, + 3 * REQUEST_TIMEOUT_COMPLETION ) if response is None: return {} @@ -3478,3 +3469,15 @@ def DecodeModifiers( self, tokenModifiers ): last_token = token return tokens + + +def RetryOnFailure( expected_error_codes, num_retries = 3 ): + for i in range( num_retries ): + try: + yield + break + except ResponseFailedException as e: + if i < ( num_retries - 1 ) and e.error_code in expected_error_codes: + continue + else: + raise diff --git a/ycmd/completers/language_server/language_server_protocol.py b/ycmd/completers/language_server/language_server_protocol.py index 9497644f83..98233d7fee 100644 --- a/ycmd/completers/language_server/language_server_protocol.py +++ b/ycmd/completers/language_server/language_server_protocol.py @@ -165,6 +165,7 @@ class Errors: TOKEN_MODIFIERS = [] + class InvalidUriException( Exception ): """Raised when trying to convert a server URI to a file path but the scheme was not supported. Only the file: scheme is supported""" diff --git a/ycmd/handlers.py b/ycmd/handlers.py index 3c60131e70..c51c0b6c25 100644 --- a/ycmd/handlers.py +++ b/ycmd/handlers.py @@ -196,7 +196,8 @@ def GetSemanticTokens(): request_data[ 'filetypes' ] ) semantic_tokens = filetype_completer.ComputeSemanticTokens( request_data ) except Exception as exception: - LOGGER.exception( 'Exception from semantic completer during tokens request' ) + LOGGER.exception( + 'Exception from semantic completer during tokens request' ) errors = [ BuildExceptionResponse( exception, traceback.format_exc() ) ] # No fallback for signature help. The general completer is unlikely to be able From 2ca4a460a3641e4d7bf3e2eeb3e3ceb4062c994f Mon Sep 17 00:00:00 2001 From: Ben Jackson Date: Mon, 20 Dec 2021 21:46:00 +0000 Subject: [PATCH 07/10] Set up tokens atlast only once --- .../language_server_completer.py | 37 ++++++++++++------- ycmd/handlers.py | 3 +- 2 files changed, 24 insertions(+), 16 deletions(-) diff --git a/ycmd/completers/language_server/language_server_completer.py b/ycmd/completers/language_server/language_server_completer.py index 68fe0d8bbf..18fde497fe 100644 --- a/ycmd/completers/language_server/language_server_completer.py +++ b/ycmd/completers/language_server/language_server_completer.py @@ -1043,6 +1043,7 @@ def ServerReset( self ): self._project_directory = None self._settings = {} self._extra_conf_dir = None + self._semantic_token_atlas = None def GetCompleterName( self ): @@ -1543,23 +1544,12 @@ def ComputeSemanticTokens( self, request_data ): if not self._ServerIsInitialized(): return {} - self._UpdateServerWithCurrentFileContents( request_data ) - - server_config = self._server_capabilities.get( 'semanticTokensProvider' ) - if server_config is None: + if not self._semantic_token_atlas: return {} - atlas = TokenAtlas( server_config[ 'legend' ] ) - - server_full_support = server_config.get( 'full' ) - if server_full_support == {}: - server_full_support = True - - if not server_full_support: - return {} + self._UpdateServerWithCurrentFileContents( request_data ) request_id = self.GetConnection().NextRequestId() - body = lsp.SemanticTokens( request_id, request_data ) for _ in RetryOnFailure( [ lsp.Errors.ContentModified ] ): @@ -1574,7 +1564,7 @@ def ComputeSemanticTokens( self, request_data ): filename = request_data[ 'filepath' ] contents = GetFileLines( request_data, filename ) result = response.get( 'result' ) or {} - tokens = _DecodeSemanticTokens( atlas, + tokens = _DecodeSemanticTokens( self._semantic_token_atlas, result.get( 'data' ) or [], filename, contents ) @@ -2291,6 +2281,21 @@ def GetSignatureTriggerCharacters( self, server_trigger_characters ): return server_trigger_characters + def _SetUpSemanticTokenAtlas( self, capabilities: dict ): + server_config = capabilities.get( 'semanticTokensProvider' ) + if server_config is None: + return + + server_full_support = server_config.get( 'full' ) + if server_full_support == {}: + server_full_support = True + + if not server_full_support: + return + + self._semantic_token_atlas = TokenAtlas( server_config[ 'legend' ] ) + + def _HandleInitializeInPollThread( self, response ): """Called within the context of the LanguageServerConnection's message pump when the initialize request receives a response.""" @@ -2308,6 +2313,8 @@ def _HandleInitializeInPollThread( self, response ): self._is_completion_provider = ( 'completionProvider' in self._server_capabilities ) + self._SetUpSemanticTokenAtlas( self._server_capabilities ) + if 'textDocumentSync' in self._server_capabilities: sync = self._server_capabilities[ 'textDocumentSync' ] SYNC_TYPE = [ @@ -3404,6 +3411,8 @@ def __init__( self, legend ): def _DecodeSemanticTokens( atlas, token_data, filename, contents ): + # We decode the tokens on the server because that's not blocking the user, + # whereas decoding in the client would be. assert len( token_data ) % 5 == 0 class Token: diff --git a/ycmd/handlers.py b/ycmd/handlers.py index c51c0b6c25..3c60131e70 100644 --- a/ycmd/handlers.py +++ b/ycmd/handlers.py @@ -196,8 +196,7 @@ def GetSemanticTokens(): request_data[ 'filetypes' ] ) semantic_tokens = filetype_completer.ComputeSemanticTokens( request_data ) except Exception as exception: - LOGGER.exception( - 'Exception from semantic completer during tokens request' ) + LOGGER.exception( 'Exception from semantic completer during tokens request' ) errors = [ BuildExceptionResponse( exception, traceback.format_exc() ) ] # No fallback for signature help. The general completer is unlikely to be able From 7211a268fb39208f14e64aa7bd9d62e1d9020632 Mon Sep 17 00:00:00 2001 From: Ben Jackson Date: Fri, 10 Jun 2022 20:02:40 +0100 Subject: [PATCH 08/10] Fix flake8 --- ycmd/handlers.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ycmd/handlers.py b/ycmd/handlers.py index 3c60131e70..c51c0b6c25 100644 --- a/ycmd/handlers.py +++ b/ycmd/handlers.py @@ -196,7 +196,8 @@ def GetSemanticTokens(): request_data[ 'filetypes' ] ) semantic_tokens = filetype_completer.ComputeSemanticTokens( request_data ) except Exception as exception: - LOGGER.exception( 'Exception from semantic completer during tokens request' ) + LOGGER.exception( + 'Exception from semantic completer during tokens request' ) errors = [ BuildExceptionResponse( exception, traceback.format_exc() ) ] # No fallback for signature help. The general completer is unlikely to be able From 5313d6fe88d874fe324dd9b885b8d8b8a04ae8c4 Mon Sep 17 00:00:00 2001 From: Ben Jackson Date: Sat, 11 Jun 2022 14:43:46 +0100 Subject: [PATCH 09/10] Add a basic test --- .../clangd/semantic_highlighting_test.py | 166 ++++++++++++++++++ 1 file changed, 166 insertions(+) create mode 100644 ycmd/tests/clangd/semantic_highlighting_test.py diff --git a/ycmd/tests/clangd/semantic_highlighting_test.py b/ycmd/tests/clangd/semantic_highlighting_test.py new file mode 100644 index 0000000000..0daf524448 --- /dev/null +++ b/ycmd/tests/clangd/semantic_highlighting_test.py @@ -0,0 +1,166 @@ +# Copyright (C) 2021 ycmd contributors +# +# This file is part of ycmd. +# +# ycmd is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# ycmd is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with ycmd. If not, see . + +import json +import requests +from unittest import TestCase +from hamcrest import assert_that, contains, empty, equal_to, has_entries + +from ycmd.tests.clangd import setUpModule, tearDownModule # noqa +from ycmd.tests.clangd import PathToTestFile, SharedYcmd, IsolatedYcmd +from ycmd.tests.test_utils import ( BuildRequest, + CombineRequest, + RangeMatcher, + WaitUntilCompleterServerReady ) +from ycmd.utils import ReadFile + + +def RunTest( app, test ): + """ + Method to run a simple completion test and verify the result + + Note: Compile commands are extracted from a compile_flags.txt file by clangd + by iteratively looking at the directory containing the source file and its + ancestors. + + test is a dictionary containing: + 'request': kwargs for BuildRequest + 'expect': { + 'response': server response code (e.g. requests.codes.ok) + 'data': matcher for the server response json + } + """ + + request = test[ 'request' ] + filetype = request.get( 'filetype', 'cpp' ) + if 'contents' not in request: + contents = ReadFile( request[ 'filepath' ] ) + request[ 'contents' ] = contents + request[ 'filetype' ] = filetype + + # Because we aren't testing this command, we *always* ignore errors. This + # is mainly because we (may) want to test scenarios where the completer + # throws an exception and the easiest way to do that is to throw from + # within the Settings function. + app.post_json( '/event_notification', + CombineRequest( request, { + 'event_name': 'FileReadyToParse', + 'filetype': filetype + } ), + expect_errors = True ) + WaitUntilCompleterServerReady( app, filetype ) + + # We also ignore errors here, but then we check the response code ourself. + # This is to allow testing of requests returning errors. + response = app.post_json( '/semantic_tokens', + BuildRequest( **request ), + expect_errors = True ) + + assert_that( response.status_code, + equal_to( test[ 'expect' ][ 'response' ] ) ) + + print( f'Completer response: { json.dumps( response.json, indent = 2 ) }' ) + + assert_that( response.json, test[ 'expect' ][ 'data' ] ) + + +class SignatureHelpTest( TestCase ): + @IsolatedYcmd + def test_none( self, app ): + RunTest( app, { + 'description': 'trigger after (', + 'request': { + 'filetype': 'cpp', + 'filepath': PathToTestFile( 'tokens.manual.cpp' ), + 'contents': '' + }, + 'expect': { + 'response': requests.codes.ok, + 'data': has_entries( { + 'errors': empty(), + 'semantic_tokens': has_entries( { + 'tokens': empty() + } ), + } ) + }, + } ) + + + @SharedYcmd + def test_basic( self, app ): + RunTest( app, { + 'description': 'trigger after (', + 'request': { + 'filetype' : 'cpp', + 'filepath' : PathToTestFile( 'tokens.manual.cpp' ), + 'contents': '#define MACRO( x, y ) do { ( x ) = ( y ); } while (0)' + }, + 'expect': { + 'response': requests.codes.ok, + 'data': has_entries( { + 'errors': empty(), + 'semantic_tokens': has_entries( { + 'tokens': contains( + has_entries( { + 'range': RangeMatcher( PathToTestFile( 'tokens.manual.cpp' ), + ( 1, 9 ), + ( 1, 14 ) ), + 'type': 'macro', + 'modifiers': contains( 'declaration', 'globalScope' ) + } ) + ) + } ), + } ) + }, + } ) + + + @SharedYcmd + def test_multiple( self, app ): + RunTest( app, { + 'description': 'trigger after (', + 'request': { + 'filetype' : 'cpp', + 'filepath' : PathToTestFile( 'tokens.manual.cpp' ), + 'contents': + '#define MACRO( x, y ) ( x );\n\nnamespace Test {}' + }, + 'expect': { + 'response': requests.codes.ok, + 'data': has_entries( { + 'errors': empty(), + 'semantic_tokens': has_entries( { + 'tokens': contains( + has_entries( { + 'range': RangeMatcher( PathToTestFile( 'tokens.manual.cpp' ), + ( 1, 9 ), + ( 1, 14 ) ), + 'type': 'macro', + 'modifiers': contains( 'declaration', 'globalScope' ) + } ), + has_entries( { + 'range': RangeMatcher( PathToTestFile( 'tokens.manual.cpp' ), + ( 3, 11 ), + ( 3, 15 ) ), + 'type': 'namespace', + 'modifiers': contains( 'declaration', 'globalScope' ) + } ) + ) + } ), + } ) + }, + } ) From 748114cf92ba9cf1214862ddf84ae03dbf3aed9a Mon Sep 17 00:00:00 2001 From: Ben Jackson Date: Sat, 11 Jun 2022 14:55:50 +0100 Subject: [PATCH 10/10] Coverage tests --- .../clangd/semantic_highlighting_test.py | 3 - ycmd/tests/go/semantic_highlighting_test.py | 91 ++++++++++++++++++ .../python/semantic_highlighting_test.py | 92 +++++++++++++++++++ 3 files changed, 183 insertions(+), 3 deletions(-) create mode 100644 ycmd/tests/go/semantic_highlighting_test.py create mode 100644 ycmd/tests/python/semantic_highlighting_test.py diff --git a/ycmd/tests/clangd/semantic_highlighting_test.py b/ycmd/tests/clangd/semantic_highlighting_test.py index 0daf524448..875c6269db 100644 --- a/ycmd/tests/clangd/semantic_highlighting_test.py +++ b/ycmd/tests/clangd/semantic_highlighting_test.py @@ -82,7 +82,6 @@ class SignatureHelpTest( TestCase ): @IsolatedYcmd def test_none( self, app ): RunTest( app, { - 'description': 'trigger after (', 'request': { 'filetype': 'cpp', 'filepath': PathToTestFile( 'tokens.manual.cpp' ), @@ -103,7 +102,6 @@ def test_none( self, app ): @SharedYcmd def test_basic( self, app ): RunTest( app, { - 'description': 'trigger after (', 'request': { 'filetype' : 'cpp', 'filepath' : PathToTestFile( 'tokens.manual.cpp' ), @@ -132,7 +130,6 @@ def test_basic( self, app ): @SharedYcmd def test_multiple( self, app ): RunTest( app, { - 'description': 'trigger after (', 'request': { 'filetype' : 'cpp', 'filepath' : PathToTestFile( 'tokens.manual.cpp' ), diff --git a/ycmd/tests/go/semantic_highlighting_test.py b/ycmd/tests/go/semantic_highlighting_test.py new file mode 100644 index 0000000000..ce2ecf2f0d --- /dev/null +++ b/ycmd/tests/go/semantic_highlighting_test.py @@ -0,0 +1,91 @@ +# Copyright (C) 2022 ycmd contributors +# +# This file is part of ycmd. +# +# ycmd is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# ycmd is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with ycmd. If not, see . + +import json +import requests +from unittest import TestCase +from hamcrest import assert_that, empty, equal_to, has_entries + +from ycmd.tests.go import setUpModule, tearDownModule # noqa +from ycmd.tests.go import PathToTestFile, SharedYcmd +from ycmd.tests.test_utils import ( BuildRequest, + CombineRequest, + WaitUntilCompleterServerReady ) +from ycmd.utils import ReadFile + + +def RunTest( app, test ): + """ + Method to run a simple completion test and verify the result + + test is a dictionary containing: + 'request': kwargs for BuildRequest + 'expect': { + 'response': server response code (e.g. requests.codes.ok) + 'data': matcher for the server response json + } + """ + + request = test[ 'request' ] + filetype = request.get( 'filetype', 'go' ) + if 'contents' not in request: + contents = ReadFile( request[ 'filepath' ] ) + request[ 'contents' ] = contents + request[ 'filetype' ] = filetype + + # Because we aren't testing this command, we *always* ignore errors. This + # is mainly because we (may) want to test scenarios where the completer + # throws an exception and the easiest way to do that is to throw from + # within the Settings function. + app.post_json( '/event_notification', + CombineRequest( request, { + 'event_name': 'FileReadyToParse', + 'filetype': filetype + } ), + expect_errors = True ) + WaitUntilCompleterServerReady( app, filetype ) + + # We also ignore errors here, but then we check the response code ourself. + # This is to allow testing of requests returning errors. + response = app.post_json( '/semantic_tokens', + BuildRequest( **request ), + expect_errors = True ) + + assert_that( response.status_code, + equal_to( test[ 'expect' ][ 'response' ] ) ) + + print( f'Completer response: { json.dumps( response.json, indent = 2 ) }' ) + + assert_that( response.json, test[ 'expect' ][ 'data' ] ) + + +class SignatureHelpTest( TestCase ): + @SharedYcmd + def test_none( self, app ): + RunTest( app, { + 'request': { + 'filetype': 'go', + 'filepath': PathToTestFile( 'thing.go' ) + }, + 'expect': { + 'response': requests.codes.ok, + 'data': has_entries( { + 'errors': empty(), + 'semantic_tokens': empty() + } ) + }, + } ) diff --git a/ycmd/tests/python/semantic_highlighting_test.py b/ycmd/tests/python/semantic_highlighting_test.py new file mode 100644 index 0000000000..88bd0e3264 --- /dev/null +++ b/ycmd/tests/python/semantic_highlighting_test.py @@ -0,0 +1,92 @@ +# Copyright (C) 2022 ycmd contributors +# +# This file is part of ycmd. +# +# ycmd is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# ycmd is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with ycmd. If not, see . + +import json +import requests +from unittest import TestCase +from hamcrest import assert_that, empty, equal_to, has_entries + +from ycmd.tests.python import setUpModule # noqa +from ycmd.tests.python import PathToTestFile, SharedYcmd +from ycmd.tests.test_utils import ( BuildRequest, + CombineRequest, + WaitUntilCompleterServerReady ) +from ycmd.utils import ReadFile + + +def RunTest( app, test ): + """ + Method to run a simple completion test and verify the result + + test is a dictionary containing: + 'request': kwargs for BuildRequest + 'expect': { + 'response': server response code (e.g. requests.codes.ok) + 'data': matcher for the server response json + } + """ + + request = test[ 'request' ] + filetype = request.get( 'filetype', 'python' ) + if 'contents' not in request: + contents = ReadFile( request[ 'filepath' ] ) + request[ 'contents' ] = contents + request[ 'filetype' ] = filetype + + # Because we aren't testing this command, we *always* ignore errors. This + # is mainly because we (may) want to test scenarios where the completer + # throws an exception and the easiest way to do that is to throw from + # within the Settings function. + app.post_json( '/event_notification', + CombineRequest( request, { + 'event_name': 'FileReadyToParse', + 'filetype': filetype + } ), + expect_errors = True ) + WaitUntilCompleterServerReady( app, filetype ) + + # We also ignore errors here, but then we check the response code ourself. + # This is to allow testing of requests returning errors. + response = app.post_json( '/semantic_tokens', + BuildRequest( **request ), + expect_errors = True ) + + assert_that( response.status_code, + equal_to( test[ 'expect' ][ 'response' ] ) ) + + print( f'Completer response: { json.dumps( response.json, indent = 2 ) }' ) + + assert_that( response.json, test[ 'expect' ][ 'data' ] ) + + +class SignatureHelpTest( TestCase ): + @SharedYcmd + def test_none( self, app ): + RunTest( app, { + 'request': { + 'filetype': 'python', + 'filepath': PathToTestFile( 'tokens.test.py' ), + 'contents': 'if 1:\n pass' + }, + 'expect': { + 'response': requests.codes.ok, + 'data': has_entries( { + 'errors': empty(), + 'semantic_tokens': empty() + } ) + }, + } )