Skip to content

Commit

Permalink
Merge pull request ycm-core#1643 from puremourning/semantic-tokens
Browse files Browse the repository at this point in the history
Support for semantic highlighting (experimental)
  • Loading branch information
mergify[bot] authored Jun 11, 2022
2 parents 20bfbbf + 748114c commit 1b6181b
Show file tree
Hide file tree
Showing 8 changed files with 638 additions and 49 deletions.
4 changes: 4 additions & 0 deletions ycmd/completers/completer.py
Original file line number Diff line number Diff line change
Expand Up @@ -369,6 +369,10 @@ def ComputeSignaturesInner( self, request_data ):
return {}


def ComputeSemanticTokens( self, request_data ):
return {}


def DefinedSubcommands( self ):
subcommands = sorted( self.GetSubcommandsMap().keys() )
try:
Expand Down
220 changes: 189 additions & 31 deletions ycmd/completers/language_server/language_server_completer.py
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,11 @@ class ResponseAbortedException( Exception ):

class ResponseFailedException( Exception ):
"""Raised by LanguageServerConnection if a request returns an error"""
pass # pragma: no cover
def __init__( self, error ):
self.error_code = error.get( 'code' ) or 0
self.error_message = error.get( 'message' ) or "No message"
super().__init__( f'Request failed: { self.error_code }: '
f'{ self.error_message }' )


class IncompatibleCompletionException( Exception ):
Expand Down Expand Up @@ -212,11 +216,7 @@ def AwaitResponse( self, timeout ):

if 'error' in self._message:
error = self._message[ 'error' ]
raise ResponseFailedException(
'Request failed: '
f'{ error.get( "code" ) or 0 }'
': '
f'{ error.get( "message" ) or "No message" }' )
raise ResponseFailedException( error )

return self._message

Expand Down Expand Up @@ -1043,6 +1043,7 @@ def ServerReset( self ):
self._project_directory = None
self._settings = {}
self._extra_conf_dir = None
self._semantic_token_atlas = None


def GetCompleterName( self ):
Expand Down Expand Up @@ -1279,7 +1280,7 @@ def ComputeCandidatesInner( self, request_data, codepoint ):
if not self._is_completion_provider:
return None, False

self._UpdateServerWithFileContents( request_data )
self._UpdateServerWithCurrentFileContents( request_data )

request_id = self.GetConnection().NextRequestId()

Expand Down Expand Up @@ -1496,18 +1497,18 @@ def SignatureHelpAvailable( self ):
else:
return responses.SignatureHelpAvailalability.NOT_AVAILABLE


def ComputeSignaturesInner( self, request_data ):
if not self.ServerIsReady():
return {}

if not self._server_capabilities.get( 'signatureHelpProvider' ):
return {}

self._UpdateServerWithFileContents( request_data )
self._UpdateServerWithCurrentFileContents( request_data )

request_id = self.GetConnection().NextRequestId()
msg = lsp.SignatureHelp( request_id, request_data )

response = self.GetConnection().GetResponse( request_id,
msg,
REQUEST_TIMEOUT_COMPLETION )
Expand Down Expand Up @@ -1536,6 +1537,43 @@ def ComputeSignaturesInner( self, request_data ):
return result


def ComputeSemanticTokens( self, request_data ):
if not self._initialize_event.wait( REQUEST_TIMEOUT_COMPLETION ):
return {}

if not self._ServerIsInitialized():
return {}

if not self._semantic_token_atlas:
return {}

self._UpdateServerWithCurrentFileContents( request_data )

request_id = self.GetConnection().NextRequestId()
body = lsp.SemanticTokens( request_id, request_data )

for _ in RetryOnFailure( [ lsp.Errors.ContentModified ] ):
response = self._connection.GetResponse(
request_id,
body,
3 * REQUEST_TIMEOUT_COMPLETION )

if response is None:
return {}

filename = request_data[ 'filepath' ]
contents = GetFileLines( request_data, filename )
result = response.get( 'result' ) or {}
tokens = _DecodeSemanticTokens( self._semantic_token_atlas,
result.get( 'data' ) or [],
filename,
contents )

return {
'tokens': tokens
}


def GetDetailedDiagnostic( self, request_data ):
self._UpdateServerWithFileContents( request_data )

Expand Down Expand Up @@ -1991,6 +2029,14 @@ def _AnySupportedFileType( self, file_types ):
return False


def _UpdateServerWithCurrentFileContents( self, request_data ):
file_name = request_data[ 'filepath' ]
contents = GetFileContents( request_data, file_name )
filetypes = request_data[ 'filetypes' ]
with self._server_info_mutex:
self._RefreshFileContentsUnderLock( file_name, contents, filetypes )


def _UpdateServerWithFileContents( self, request_data ):
"""Update the server with the current contents of all open buffers, and
close any buffers no longer open.
Expand All @@ -2003,6 +2049,32 @@ def _UpdateServerWithFileContents( self, request_data ):
self._PurgeMissingFilesUnderLock( files_to_purge )


def _RefreshFileContentsUnderLock( self, file_name, contents, file_types ):
file_state = self._server_file_state[ file_name ]
action = file_state.GetDirtyFileAction( contents )

LOGGER.debug( 'Refreshing file %s: State is %s/action %s',
file_name,
file_state.state,
action )

if action == lsp.ServerFileState.OPEN_FILE:
msg = lsp.DidOpenTextDocument( file_state,
file_types,
contents )

self.GetConnection().SendNotification( msg )
elif action == lsp.ServerFileState.CHANGE_FILE:
# FIXME: DidChangeTextDocument doesn't actually do anything
# different from DidOpenTextDocument other than send the right
# message, because we don't actually have a mechanism for generating
# the diffs. This isn't strictly necessary, but might lead to
# performance problems.
msg = lsp.DidChangeTextDocument( file_state, contents )

self.GetConnection().SendNotification( msg )


def _UpdateDirtyFilesUnderLock( self, request_data ):
for file_name, file_data in request_data[ 'file_data' ].items():
if not self._AnySupportedFileType( file_data[ 'filetypes' ] ):
Expand All @@ -2013,29 +2085,10 @@ def _UpdateDirtyFilesUnderLock( self, request_data ):
self.SupportedFiletypes() )
continue

file_state = self._server_file_state[ file_name ]
action = file_state.GetDirtyFileAction( file_data[ 'contents' ] )

LOGGER.debug( 'Refreshing file %s: State is %s/action %s',
file_name,
file_state.state,
action )
self._RefreshFileContentsUnderLock( file_name,
file_data[ 'contents' ],
file_data[ 'filetypes' ] )

if action == lsp.ServerFileState.OPEN_FILE:
msg = lsp.DidOpenTextDocument( file_state,
file_data[ 'filetypes' ],
file_data[ 'contents' ] )

self.GetConnection().SendNotification( msg )
elif action == lsp.ServerFileState.CHANGE_FILE:
# FIXME: DidChangeTextDocument doesn't actually do anything
# different from DidOpenTextDocument other than send the right
# message, because we don't actually have a mechanism for generating
# the diffs. This isn't strictly necessary, but might lead to
# performance problems.
msg = lsp.DidChangeTextDocument( file_state, file_data[ 'contents' ] )

self.GetConnection().SendNotification( msg )


def _UpdateSavedFilesUnderLock( self, request_data ):
Expand Down Expand Up @@ -2228,6 +2281,21 @@ def GetSignatureTriggerCharacters( self, server_trigger_characters ):
return server_trigger_characters


def _SetUpSemanticTokenAtlas( self, capabilities: dict ):
server_config = capabilities.get( 'semanticTokensProvider' )
if server_config is None:
return

server_full_support = server_config.get( 'full' )
if server_full_support == {}:
server_full_support = True

if not server_full_support:
return

self._semantic_token_atlas = TokenAtlas( server_config[ 'legend' ] )


def _HandleInitializeInPollThread( self, response ):
"""Called within the context of the LanguageServerConnection's message pump
when the initialize request receives a response."""
Expand All @@ -2245,6 +2313,8 @@ def _HandleInitializeInPollThread( self, response ):
self._is_completion_provider = (
'completionProvider' in self._server_capabilities )

self._SetUpSemanticTokenAtlas( self._server_capabilities )

if 'textDocumentSync' in self._server_capabilities:
sync = self._server_capabilities[ 'textDocumentSync' ]
SYNC_TYPE = [
Expand Down Expand Up @@ -3332,3 +3402,91 @@ def on_deleted( self, event ):
with self._server._server_info_mutex:
msg = lsp.DidChangeWatchedFiles( event.src_path, 'delete' )
self._server.GetConnection().SendNotification( msg )


class TokenAtlas:
def __init__( self, legend ):
self.tokenTypes = legend[ 'tokenTypes' ]
self.tokenModifiers = legend[ 'tokenModifiers' ]


def _DecodeSemanticTokens( atlas, token_data, filename, contents ):
# We decode the tokens on the server because that's not blocking the user,
# whereas decoding in the client would be.
assert len( token_data ) % 5 == 0

class Token:
line = 0
start_character = 0
num_characters = 0
token_type = 0
token_modifiers = 0

def DecodeModifiers( self, tokenModifiers ):
modifiers = []
bit_index = 0
while True:
bit_value = pow( 2, bit_index )

if bit_value > self.token_modifiers:
break

if self.token_modifiers & bit_value:
modifiers.append( tokenModifiers[ bit_index ] )

bit_index += 1

return modifiers


last_token = Token()
tokens = []

for token_index in range( 0, len( token_data ), 5 ):
token = Token()

token.line = last_token.line + token_data[ token_index ]

token.start_character = token_data[ token_index + 1 ]
if token.line == last_token.line:
token.start_character += last_token.start_character

token.num_characters = token_data[ token_index + 2 ]

token.token_type = token_data[ token_index + 3 ]
token.token_modifiers = token_data[ token_index + 4 ]

tokens.append( {
'range': responses.BuildRangeData( _BuildRange(
contents,
filename,
{
'start': {
'line': token.line,
'character': token.start_character,
},
'end': {
'line': token.line,
'character': token.start_character + token.num_characters,
}
}
) ),
'type': atlas.tokenTypes[ token.token_type ],
'modifiers': token.DecodeModifiers( atlas.tokenModifiers )
} )

last_token = token

return tokens


def RetryOnFailure( expected_error_codes, num_retries = 3 ):
for i in range( num_retries ):
try:
yield
break
except ResponseFailedException as e:
if i < ( num_retries - 1 ) and e.error_code in expected_error_codes:
continue
else:
raise
Loading

0 comments on commit 1b6181b

Please sign in to comment.