diff --git a/__pycache__/main.cpython-312.pyc b/__pycache__/main.cpython-312.pyc index 497c75c..eb53ba1 100644 Binary files a/__pycache__/main.cpython-312.pyc and b/__pycache__/main.cpython-312.pyc differ diff --git a/__pycache__/stt.cpython-312.pyc b/__pycache__/stt.cpython-312.pyc index 8a6f192..7dcd8cb 100644 Binary files a/__pycache__/stt.cpython-312.pyc and b/__pycache__/stt.cpython-312.pyc differ diff --git a/hackaton/bin/dotenv b/hackaton/bin/dotenv new file mode 100755 index 0000000..1d301ce --- /dev/null +++ b/hackaton/bin/dotenv @@ -0,0 +1,8 @@ +#!/Users/hellosj00/dev/AI/hackaton/bin/python +# -*- coding: utf-8 -*- +import re +import sys +from dotenv.__main__ import cli +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(cli()) diff --git a/hackaton/bin/watchfiles b/hackaton/bin/watchfiles new file mode 100755 index 0000000..af4bfe3 --- /dev/null +++ b/hackaton/bin/watchfiles @@ -0,0 +1,8 @@ +#!/Users/hellosj00/dev/AI/hackaton/bin/python +# -*- coding: utf-8 -*- +import re +import sys +from watchfiles.cli import cli +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(cli()) diff --git a/hackaton/lib/python3.12/site-packages/PyYAML-6.0.2.dist-info/INSTALLER b/hackaton/lib/python3.12/site-packages/PyYAML-6.0.2.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/PyYAML-6.0.2.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/hackaton/lib/python3.12/site-packages/PyYAML-6.0.2.dist-info/LICENSE b/hackaton/lib/python3.12/site-packages/PyYAML-6.0.2.dist-info/LICENSE new file mode 100644 index 0000000..2f1b8e1 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/PyYAML-6.0.2.dist-info/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2017-2021 Ingy döt Net +Copyright (c) 2006-2016 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/hackaton/lib/python3.12/site-packages/PyYAML-6.0.2.dist-info/METADATA b/hackaton/lib/python3.12/site-packages/PyYAML-6.0.2.dist-info/METADATA new file mode 100644 index 0000000..db029b7 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/PyYAML-6.0.2.dist-info/METADATA @@ -0,0 +1,46 @@ +Metadata-Version: 2.1 +Name: PyYAML +Version: 6.0.2 +Summary: YAML parser and emitter for Python +Home-page: https://pyyaml.org/ +Download-URL: https://pypi.org/project/PyYAML/ +Author: Kirill Simonov +Author-email: xi@resolvent.net +License: MIT +Project-URL: Bug Tracker, https://github.com/yaml/pyyaml/issues +Project-URL: CI, https://github.com/yaml/pyyaml/actions +Project-URL: Documentation, https://pyyaml.org/wiki/PyYAMLDocumentation +Project-URL: Mailing lists, http://lists.sourceforge.net/lists/listinfo/yaml-core +Project-URL: Source Code, https://github.com/yaml/pyyaml +Platform: Any +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Cython +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: Text Processing :: Markup +Requires-Python: >=3.8 +License-File: LICENSE + +YAML is a data serialization format designed for human readability +and interaction with scripting languages. PyYAML is a YAML parser +and emitter for Python. + +PyYAML features a complete YAML 1.1 parser, Unicode support, pickle +support, capable extension API, and sensible error messages. PyYAML +supports standard YAML tags and provides Python-specific tags that +allow to represent an arbitrary Python object. + +PyYAML is applicable for a broad range of tasks from complex +configuration files to object serialization and persistence. diff --git a/hackaton/lib/python3.12/site-packages/PyYAML-6.0.2.dist-info/RECORD b/hackaton/lib/python3.12/site-packages/PyYAML-6.0.2.dist-info/RECORD new file mode 100644 index 0000000..1a9fb6b --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/PyYAML-6.0.2.dist-info/RECORD @@ -0,0 +1,43 @@ +PyYAML-6.0.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +PyYAML-6.0.2.dist-info/LICENSE,sha256=jTko-dxEkP1jVwfLiOsmvXZBAqcoKVQwfT5RZ6V36KQ,1101 +PyYAML-6.0.2.dist-info/METADATA,sha256=9-odFB5seu4pGPcEv7E8iyxNF51_uKnaNGjLAhz2lto,2060 +PyYAML-6.0.2.dist-info/RECORD,, +PyYAML-6.0.2.dist-info/WHEEL,sha256=h9jBNgvnuEaix45NgESHvfNcOPGGNEywrbP9Un7hZlk,110 +PyYAML-6.0.2.dist-info/top_level.txt,sha256=rpj0IVMTisAjh_1vG3Ccf9v5jpCQwAz6cD1IVU5ZdhQ,11 +_yaml/__init__.py,sha256=04Ae_5osxahpJHa3XBZUAf4wi6XX32gR8D6X6p64GEA,1402 +_yaml/__pycache__/__init__.cpython-312.pyc,, +yaml/__init__.py,sha256=N35S01HMesFTe0aRRMWkPj0Pa8IEbHpE9FK7cr5Bdtw,12311 +yaml/__pycache__/__init__.cpython-312.pyc,, +yaml/__pycache__/composer.cpython-312.pyc,, +yaml/__pycache__/constructor.cpython-312.pyc,, +yaml/__pycache__/cyaml.cpython-312.pyc,, +yaml/__pycache__/dumper.cpython-312.pyc,, +yaml/__pycache__/emitter.cpython-312.pyc,, +yaml/__pycache__/error.cpython-312.pyc,, +yaml/__pycache__/events.cpython-312.pyc,, +yaml/__pycache__/loader.cpython-312.pyc,, +yaml/__pycache__/nodes.cpython-312.pyc,, +yaml/__pycache__/parser.cpython-312.pyc,, +yaml/__pycache__/reader.cpython-312.pyc,, +yaml/__pycache__/representer.cpython-312.pyc,, +yaml/__pycache__/resolver.cpython-312.pyc,, +yaml/__pycache__/scanner.cpython-312.pyc,, +yaml/__pycache__/serializer.cpython-312.pyc,, +yaml/__pycache__/tokens.cpython-312.pyc,, +yaml/_yaml.cpython-312-darwin.so,sha256=kJhYpZpKSMBUaR-esOCOg7Ao1QLyNGl3tVdptwNUd6Y,375656 +yaml/composer.py,sha256=_Ko30Wr6eDWUeUpauUGT3Lcg9QPBnOPVlTnIMRGJ9FM,4883 +yaml/constructor.py,sha256=kNgkfaeLUkwQYY_Q6Ff1Tz2XVw_pG1xVE9Ak7z-viLA,28639 +yaml/cyaml.py,sha256=6ZrAG9fAYvdVe2FK_w0hmXoG7ZYsoYUwapG8CiC72H0,3851 +yaml/dumper.py,sha256=PLctZlYwZLp7XmeUdwRuv4nYOZ2UBnDIUy8-lKfLF-o,2837 +yaml/emitter.py,sha256=jghtaU7eFwg31bG0B7RZea_29Adi9CKmXq_QjgQpCkQ,43006 +yaml/error.py,sha256=Ah9z-toHJUbE9j-M8YpxgSRM5CgLCcwVzJgLLRF2Fxo,2533 +yaml/events.py,sha256=50_TksgQiE4up-lKo_V-nBy-tAIxkIPQxY5qDhKCeHw,2445 +yaml/loader.py,sha256=UVa-zIqmkFSCIYq_PgSGm4NSJttHY2Rf_zQ4_b1fHN0,2061 +yaml/nodes.py,sha256=gPKNj8pKCdh2d4gr3gIYINnPOaOxGhJAUiYhGRnPE84,1440 +yaml/parser.py,sha256=ilWp5vvgoHFGzvOZDItFoGjD6D42nhlZrZyjAwa0oJo,25495 +yaml/reader.py,sha256=0dmzirOiDG4Xo41RnuQS7K9rkY3xjHiVasfDMNTqCNw,6794 +yaml/representer.py,sha256=IuWP-cAW9sHKEnS0gCqSa894k1Bg4cgTxaDwIcbRQ-Y,14190 +yaml/resolver.py,sha256=9L-VYfm4mWHxUD1Vg4X7rjDRK_7VZd6b92wzq7Y2IKY,9004 +yaml/scanner.py,sha256=YEM3iLZSaQwXcQRg2l2R4MdT0zGP2F9eHkKGKnHyWQY,51279 +yaml/serializer.py,sha256=ChuFgmhU01hj4xgI8GaKv6vfM2Bujwa9i7d2FAHj7cA,4165 +yaml/tokens.py,sha256=lTQIzSVw8Mg9wv459-TjiOQe6wVziqaRlqX2_89rp54,2573 diff --git a/hackaton/lib/python3.12/site-packages/PyYAML-6.0.2.dist-info/WHEEL b/hackaton/lib/python3.12/site-packages/PyYAML-6.0.2.dist-info/WHEEL new file mode 100644 index 0000000..fff8a43 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/PyYAML-6.0.2.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.44.0) +Root-Is-Purelib: false +Tag: cp312-cp312-macosx_11_0_arm64 + diff --git a/hackaton/lib/python3.12/site-packages/PyYAML-6.0.2.dist-info/top_level.txt b/hackaton/lib/python3.12/site-packages/PyYAML-6.0.2.dist-info/top_level.txt new file mode 100644 index 0000000..e6475e9 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/PyYAML-6.0.2.dist-info/top_level.txt @@ -0,0 +1,2 @@ +_yaml +yaml diff --git a/hackaton/lib/python3.12/site-packages/_yaml/__init__.py b/hackaton/lib/python3.12/site-packages/_yaml/__init__.py new file mode 100644 index 0000000..7baa8c4 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/_yaml/__init__.py @@ -0,0 +1,33 @@ +# This is a stub package designed to roughly emulate the _yaml +# extension module, which previously existed as a standalone module +# and has been moved into the `yaml` package namespace. +# It does not perfectly mimic its old counterpart, but should get +# close enough for anyone who's relying on it even when they shouldn't. +import yaml + +# in some circumstances, the yaml module we imoprted may be from a different version, so we need +# to tread carefully when poking at it here (it may not have the attributes we expect) +if not getattr(yaml, '__with_libyaml__', False): + from sys import version_info + + exc = ModuleNotFoundError if version_info >= (3, 6) else ImportError + raise exc("No module named '_yaml'") +else: + from yaml._yaml import * + import warnings + warnings.warn( + 'The _yaml extension module is now located at yaml._yaml' + ' and its location is subject to change. To use the' + ' LibYAML-based parser and emitter, import from `yaml`:' + ' `from yaml import CLoader as Loader, CDumper as Dumper`.', + DeprecationWarning + ) + del warnings + # Don't `del yaml` here because yaml is actually an existing + # namespace member of _yaml. + +__name__ = '_yaml' +# If the module is top-level (i.e. not a part of any specific package) +# then the attribute should be set to ''. +# https://docs.python.org/3.8/library/types.html +__package__ = '' diff --git a/hackaton/lib/python3.12/site-packages/_yaml/__pycache__/__init__.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/_yaml/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..85f72eb Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/_yaml/__pycache__/__init__.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/dotenv/__init__.py b/hackaton/lib/python3.12/site-packages/dotenv/__init__.py new file mode 100644 index 0000000..7f4c631 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/dotenv/__init__.py @@ -0,0 +1,49 @@ +from typing import Any, Optional + +from .main import (dotenv_values, find_dotenv, get_key, load_dotenv, set_key, + unset_key) + + +def load_ipython_extension(ipython: Any) -> None: + from .ipython import load_ipython_extension + load_ipython_extension(ipython) + + +def get_cli_string( + path: Optional[str] = None, + action: Optional[str] = None, + key: Optional[str] = None, + value: Optional[str] = None, + quote: Optional[str] = None, +): + """Returns a string suitable for running as a shell script. + + Useful for converting a arguments passed to a fabric task + to be passed to a `local` or `run` command. + """ + command = ['dotenv'] + if quote: + command.append(f'-q {quote}') + if path: + command.append(f'-f {path}') + if action: + command.append(action) + if key: + command.append(key) + if value: + if ' ' in value: + command.append(f'"{value}"') + else: + command.append(value) + + return ' '.join(command).strip() + + +__all__ = ['get_cli_string', + 'load_dotenv', + 'dotenv_values', + 'get_key', + 'set_key', + 'unset_key', + 'find_dotenv', + 'load_ipython_extension'] diff --git a/hackaton/lib/python3.12/site-packages/dotenv/__main__.py b/hackaton/lib/python3.12/site-packages/dotenv/__main__.py new file mode 100644 index 0000000..3977f55 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/dotenv/__main__.py @@ -0,0 +1,6 @@ +"""Entry point for cli, enables execution with `python -m dotenv`""" + +from .cli import cli + +if __name__ == "__main__": + cli() diff --git a/hackaton/lib/python3.12/site-packages/dotenv/__pycache__/__init__.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/dotenv/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..8d517b6 Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/dotenv/__pycache__/__init__.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/dotenv/__pycache__/__main__.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/dotenv/__pycache__/__main__.cpython-312.pyc new file mode 100644 index 0000000..b93c79b Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/dotenv/__pycache__/__main__.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/dotenv/__pycache__/cli.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/dotenv/__pycache__/cli.cpython-312.pyc new file mode 100644 index 0000000..7266def Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/dotenv/__pycache__/cli.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/dotenv/__pycache__/ipython.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/dotenv/__pycache__/ipython.cpython-312.pyc new file mode 100644 index 0000000..a243274 Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/dotenv/__pycache__/ipython.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/dotenv/__pycache__/main.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/dotenv/__pycache__/main.cpython-312.pyc new file mode 100644 index 0000000..1add5fd Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/dotenv/__pycache__/main.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/dotenv/__pycache__/parser.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/dotenv/__pycache__/parser.cpython-312.pyc new file mode 100644 index 0000000..68475d8 Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/dotenv/__pycache__/parser.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/dotenv/__pycache__/variables.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/dotenv/__pycache__/variables.cpython-312.pyc new file mode 100644 index 0000000..52c5d0f Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/dotenv/__pycache__/variables.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/dotenv/__pycache__/version.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/dotenv/__pycache__/version.cpython-312.pyc new file mode 100644 index 0000000..92f09bb Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/dotenv/__pycache__/version.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/dotenv/cli.py b/hackaton/lib/python3.12/site-packages/dotenv/cli.py new file mode 100644 index 0000000..65ead46 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/dotenv/cli.py @@ -0,0 +1,199 @@ +import json +import os +import shlex +import sys +from contextlib import contextmanager +from subprocess import Popen +from typing import Any, Dict, IO, Iterator, List + +try: + import click +except ImportError: + sys.stderr.write('It seems python-dotenv is not installed with cli option. \n' + 'Run pip install "python-dotenv[cli]" to fix this.') + sys.exit(1) + +from .main import dotenv_values, set_key, unset_key +from .version import __version__ + + +def enumerate_env(): + """ + Return a path for the ${pwd}/.env file. + + If pwd does not exist, return None. + """ + try: + cwd = os.getcwd() + except FileNotFoundError: + return None + path = os.path.join(cwd, '.env') + return path + + +@click.group() +@click.option('-f', '--file', default=enumerate_env(), + type=click.Path(file_okay=True), + help="Location of the .env file, defaults to .env file in current working directory.") +@click.option('-q', '--quote', default='always', + type=click.Choice(['always', 'never', 'auto']), + help="Whether to quote or not the variable values. Default mode is always. This does not affect parsing.") +@click.option('-e', '--export', default=False, + type=click.BOOL, + help="Whether to write the dot file as an executable bash script.") +@click.version_option(version=__version__) +@click.pass_context +def cli(ctx: click.Context, file: Any, quote: Any, export: Any) -> None: + """This script is used to set, get or unset values from a .env file.""" + ctx.obj = {'QUOTE': quote, 'EXPORT': export, 'FILE': file} + + +@contextmanager +def stream_file(path: os.PathLike) -> Iterator[IO[str]]: + """ + Open a file and yield the corresponding (decoded) stream. + + Exits with error code 2 if the file cannot be opened. + """ + + try: + with open(path) as stream: + yield stream + except OSError as exc: + print(f"Error opening env file: {exc}", file=sys.stderr) + exit(2) + + +@cli.command() +@click.pass_context +@click.option('--format', default='simple', + type=click.Choice(['simple', 'json', 'shell', 'export']), + help="The format in which to display the list. Default format is simple, " + "which displays name=value without quotes.") +def list(ctx: click.Context, format: bool) -> None: + """Display all the stored key/value.""" + file = ctx.obj['FILE'] + + with stream_file(file) as stream: + values = dotenv_values(stream=stream) + + if format == 'json': + click.echo(json.dumps(values, indent=2, sort_keys=True)) + else: + prefix = 'export ' if format == 'export' else '' + for k in sorted(values): + v = values[k] + if v is not None: + if format in ('export', 'shell'): + v = shlex.quote(v) + click.echo(f'{prefix}{k}={v}') + + +@cli.command() +@click.pass_context +@click.argument('key', required=True) +@click.argument('value', required=True) +def set(ctx: click.Context, key: Any, value: Any) -> None: + """Store the given key/value.""" + file = ctx.obj['FILE'] + quote = ctx.obj['QUOTE'] + export = ctx.obj['EXPORT'] + success, key, value = set_key(file, key, value, quote, export) + if success: + click.echo(f'{key}={value}') + else: + exit(1) + + +@cli.command() +@click.pass_context +@click.argument('key', required=True) +def get(ctx: click.Context, key: Any) -> None: + """Retrieve the value for the given key.""" + file = ctx.obj['FILE'] + + with stream_file(file) as stream: + values = dotenv_values(stream=stream) + + stored_value = values.get(key) + if stored_value: + click.echo(stored_value) + else: + exit(1) + + +@cli.command() +@click.pass_context +@click.argument('key', required=True) +def unset(ctx: click.Context, key: Any) -> None: + """Removes the given key.""" + file = ctx.obj['FILE'] + quote = ctx.obj['QUOTE'] + success, key = unset_key(file, key, quote) + if success: + click.echo(f"Successfully removed {key}") + else: + exit(1) + + +@cli.command(context_settings={'ignore_unknown_options': True}) +@click.pass_context +@click.option( + "--override/--no-override", + default=True, + help="Override variables from the environment file with those from the .env file.", +) +@click.argument('commandline', nargs=-1, type=click.UNPROCESSED) +def run(ctx: click.Context, override: bool, commandline: List[str]) -> None: + """Run command with environment variables present.""" + file = ctx.obj['FILE'] + if not os.path.isfile(file): + raise click.BadParameter( + f'Invalid value for \'-f\' "{file}" does not exist.', + ctx=ctx + ) + dotenv_as_dict = { + k: v + for (k, v) in dotenv_values(file).items() + if v is not None and (override or k not in os.environ) + } + + if not commandline: + click.echo('No command given.') + exit(1) + ret = run_command(commandline, dotenv_as_dict) + exit(ret) + + +def run_command(command: List[str], env: Dict[str, str]) -> int: + """Run command in sub process. + + Runs the command in a sub process with the variables from `env` + added in the current environment variables. + + Parameters + ---------- + command: List[str] + The command and it's parameters + env: Dict + The additional environment variables + + Returns + ------- + int + The return code of the command + + """ + # copy the current environment variables and add the vales from + # `env` + cmd_env = os.environ.copy() + cmd_env.update(env) + + p = Popen(command, + universal_newlines=True, + bufsize=0, + shell=False, + env=cmd_env) + _, _ = p.communicate() + + return p.returncode diff --git a/hackaton/lib/python3.12/site-packages/dotenv/ipython.py b/hackaton/lib/python3.12/site-packages/dotenv/ipython.py new file mode 100644 index 0000000..7df727c --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/dotenv/ipython.py @@ -0,0 +1,39 @@ +from IPython.core.magic import Magics, line_magic, magics_class # type: ignore +from IPython.core.magic_arguments import (argument, magic_arguments, # type: ignore + parse_argstring) # type: ignore + +from .main import find_dotenv, load_dotenv + + +@magics_class +class IPythonDotEnv(Magics): + + @magic_arguments() + @argument( + '-o', '--override', action='store_true', + help="Indicate to override existing variables" + ) + @argument( + '-v', '--verbose', action='store_true', + help="Indicate function calls to be verbose" + ) + @argument('dotenv_path', nargs='?', type=str, default='.env', + help='Search in increasingly higher folders for the `dotenv_path`') + @line_magic + def dotenv(self, line): + args = parse_argstring(self.dotenv, line) + # Locate the .env file + dotenv_path = args.dotenv_path + try: + dotenv_path = find_dotenv(dotenv_path, True, True) + except IOError: + print("cannot find .env file") + return + + # Load the .env file + load_dotenv(dotenv_path, verbose=args.verbose, override=args.override) + + +def load_ipython_extension(ipython): + """Register the %dotenv magic.""" + ipython.register_magics(IPythonDotEnv) diff --git a/hackaton/lib/python3.12/site-packages/dotenv/main.py b/hackaton/lib/python3.12/site-packages/dotenv/main.py new file mode 100644 index 0000000..7bc5428 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/dotenv/main.py @@ -0,0 +1,392 @@ +import io +import logging +import os +import pathlib +import shutil +import sys +import tempfile +from collections import OrderedDict +from contextlib import contextmanager +from typing import (IO, Dict, Iterable, Iterator, Mapping, Optional, Tuple, + Union) + +from .parser import Binding, parse_stream +from .variables import parse_variables + +# A type alias for a string path to be used for the paths in this file. +# These paths may flow to `open()` and `shutil.move()`; `shutil.move()` +# only accepts string paths, not byte paths or file descriptors. See +# https://github.com/python/typeshed/pull/6832. +StrPath = Union[str, 'os.PathLike[str]'] + +logger = logging.getLogger(__name__) + + +def with_warn_for_invalid_lines(mappings: Iterator[Binding]) -> Iterator[Binding]: + for mapping in mappings: + if mapping.error: + logger.warning( + "Python-dotenv could not parse statement starting at line %s", + mapping.original.line, + ) + yield mapping + + +class DotEnv: + def __init__( + self, + dotenv_path: Optional[StrPath], + stream: Optional[IO[str]] = None, + verbose: bool = False, + encoding: Optional[str] = None, + interpolate: bool = True, + override: bool = True, + ) -> None: + self.dotenv_path: Optional[StrPath] = dotenv_path + self.stream: Optional[IO[str]] = stream + self._dict: Optional[Dict[str, Optional[str]]] = None + self.verbose: bool = verbose + self.encoding: Optional[str] = encoding + self.interpolate: bool = interpolate + self.override: bool = override + + @contextmanager + def _get_stream(self) -> Iterator[IO[str]]: + if self.dotenv_path and os.path.isfile(self.dotenv_path): + with open(self.dotenv_path, encoding=self.encoding) as stream: + yield stream + elif self.stream is not None: + yield self.stream + else: + if self.verbose: + logger.info( + "Python-dotenv could not find configuration file %s.", + self.dotenv_path or '.env', + ) + yield io.StringIO('') + + def dict(self) -> Dict[str, Optional[str]]: + """Return dotenv as dict""" + if self._dict: + return self._dict + + raw_values = self.parse() + + if self.interpolate: + self._dict = OrderedDict(resolve_variables(raw_values, override=self.override)) + else: + self._dict = OrderedDict(raw_values) + + return self._dict + + def parse(self) -> Iterator[Tuple[str, Optional[str]]]: + with self._get_stream() as stream: + for mapping in with_warn_for_invalid_lines(parse_stream(stream)): + if mapping.key is not None: + yield mapping.key, mapping.value + + def set_as_environment_variables(self) -> bool: + """ + Load the current dotenv as system environment variable. + """ + if not self.dict(): + return False + + for k, v in self.dict().items(): + if k in os.environ and not self.override: + continue + if v is not None: + os.environ[k] = v + + return True + + def get(self, key: str) -> Optional[str]: + """ + """ + data = self.dict() + + if key in data: + return data[key] + + if self.verbose: + logger.warning("Key %s not found in %s.", key, self.dotenv_path) + + return None + + +def get_key( + dotenv_path: StrPath, + key_to_get: str, + encoding: Optional[str] = "utf-8", +) -> Optional[str]: + """ + Get the value of a given key from the given .env. + + Returns `None` if the key isn't found or doesn't have a value. + """ + return DotEnv(dotenv_path, verbose=True, encoding=encoding).get(key_to_get) + + +@contextmanager +def rewrite( + path: StrPath, + encoding: Optional[str], +) -> Iterator[Tuple[IO[str], IO[str]]]: + pathlib.Path(path).touch() + + with tempfile.NamedTemporaryFile(mode="w", encoding=encoding, delete=False) as dest: + error = None + try: + with open(path, encoding=encoding) as source: + yield (source, dest) + except BaseException as err: + error = err + + if error is None: + shutil.move(dest.name, path) + else: + os.unlink(dest.name) + raise error from None + + +def set_key( + dotenv_path: StrPath, + key_to_set: str, + value_to_set: str, + quote_mode: str = "always", + export: bool = False, + encoding: Optional[str] = "utf-8", +) -> Tuple[Optional[bool], str, str]: + """ + Adds or Updates a key/value to the given .env + + If the .env path given doesn't exist, fails instead of risking creating + an orphan .env somewhere in the filesystem + """ + if quote_mode not in ("always", "auto", "never"): + raise ValueError(f"Unknown quote_mode: {quote_mode}") + + quote = ( + quote_mode == "always" + or (quote_mode == "auto" and not value_to_set.isalnum()) + ) + + if quote: + value_out = "'{}'".format(value_to_set.replace("'", "\\'")) + else: + value_out = value_to_set + if export: + line_out = f'export {key_to_set}={value_out}\n' + else: + line_out = f"{key_to_set}={value_out}\n" + + with rewrite(dotenv_path, encoding=encoding) as (source, dest): + replaced = False + missing_newline = False + for mapping in with_warn_for_invalid_lines(parse_stream(source)): + if mapping.key == key_to_set: + dest.write(line_out) + replaced = True + else: + dest.write(mapping.original.string) + missing_newline = not mapping.original.string.endswith("\n") + if not replaced: + if missing_newline: + dest.write("\n") + dest.write(line_out) + + return True, key_to_set, value_to_set + + +def unset_key( + dotenv_path: StrPath, + key_to_unset: str, + quote_mode: str = "always", + encoding: Optional[str] = "utf-8", +) -> Tuple[Optional[bool], str]: + """ + Removes a given key from the given `.env` file. + + If the .env path given doesn't exist, fails. + If the given key doesn't exist in the .env, fails. + """ + if not os.path.exists(dotenv_path): + logger.warning("Can't delete from %s - it doesn't exist.", dotenv_path) + return None, key_to_unset + + removed = False + with rewrite(dotenv_path, encoding=encoding) as (source, dest): + for mapping in with_warn_for_invalid_lines(parse_stream(source)): + if mapping.key == key_to_unset: + removed = True + else: + dest.write(mapping.original.string) + + if not removed: + logger.warning("Key %s not removed from %s - key doesn't exist.", key_to_unset, dotenv_path) + return None, key_to_unset + + return removed, key_to_unset + + +def resolve_variables( + values: Iterable[Tuple[str, Optional[str]]], + override: bool, +) -> Mapping[str, Optional[str]]: + new_values: Dict[str, Optional[str]] = {} + + for (name, value) in values: + if value is None: + result = None + else: + atoms = parse_variables(value) + env: Dict[str, Optional[str]] = {} + if override: + env.update(os.environ) # type: ignore + env.update(new_values) + else: + env.update(new_values) + env.update(os.environ) # type: ignore + result = "".join(atom.resolve(env) for atom in atoms) + + new_values[name] = result + + return new_values + + +def _walk_to_root(path: str) -> Iterator[str]: + """ + Yield directories starting from the given directory up to the root + """ + if not os.path.exists(path): + raise IOError('Starting path not found') + + if os.path.isfile(path): + path = os.path.dirname(path) + + last_dir = None + current_dir = os.path.abspath(path) + while last_dir != current_dir: + yield current_dir + parent_dir = os.path.abspath(os.path.join(current_dir, os.path.pardir)) + last_dir, current_dir = current_dir, parent_dir + + +def find_dotenv( + filename: str = '.env', + raise_error_if_not_found: bool = False, + usecwd: bool = False, +) -> str: + """ + Search in increasingly higher folders for the given file + + Returns path to the file if found, or an empty string otherwise + """ + + def _is_interactive(): + """ Decide whether this is running in a REPL or IPython notebook """ + try: + main = __import__('__main__', None, None, fromlist=['__file__']) + except ModuleNotFoundError: + return False + return not hasattr(main, '__file__') + + if usecwd or _is_interactive() or getattr(sys, 'frozen', False): + # Should work without __file__, e.g. in REPL or IPython notebook. + path = os.getcwd() + else: + # will work for .py files + frame = sys._getframe() + current_file = __file__ + + while frame.f_code.co_filename == current_file or not os.path.exists( + frame.f_code.co_filename + ): + assert frame.f_back is not None + frame = frame.f_back + frame_filename = frame.f_code.co_filename + path = os.path.dirname(os.path.abspath(frame_filename)) + + for dirname in _walk_to_root(path): + check_path = os.path.join(dirname, filename) + if os.path.isfile(check_path): + return check_path + + if raise_error_if_not_found: + raise IOError('File not found') + + return '' + + +def load_dotenv( + dotenv_path: Optional[StrPath] = None, + stream: Optional[IO[str]] = None, + verbose: bool = False, + override: bool = False, + interpolate: bool = True, + encoding: Optional[str] = "utf-8", +) -> bool: + """Parse a .env file and then load all the variables found as environment variables. + + Parameters: + dotenv_path: Absolute or relative path to .env file. + stream: Text stream (such as `io.StringIO`) with .env content, used if + `dotenv_path` is `None`. + verbose: Whether to output a warning the .env file is missing. + override: Whether to override the system environment variables with the variables + from the `.env` file. + encoding: Encoding to be used to read the file. + Returns: + Bool: True if at least one environment variable is set else False + + If both `dotenv_path` and `stream` are `None`, `find_dotenv()` is used to find the + .env file. + """ + if dotenv_path is None and stream is None: + dotenv_path = find_dotenv() + + dotenv = DotEnv( + dotenv_path=dotenv_path, + stream=stream, + verbose=verbose, + interpolate=interpolate, + override=override, + encoding=encoding, + ) + return dotenv.set_as_environment_variables() + + +def dotenv_values( + dotenv_path: Optional[StrPath] = None, + stream: Optional[IO[str]] = None, + verbose: bool = False, + interpolate: bool = True, + encoding: Optional[str] = "utf-8", +) -> Dict[str, Optional[str]]: + """ + Parse a .env file and return its content as a dict. + + The returned dict will have `None` values for keys without values in the .env file. + For example, `foo=bar` results in `{"foo": "bar"}` whereas `foo` alone results in + `{"foo": None}` + + Parameters: + dotenv_path: Absolute or relative path to the .env file. + stream: `StringIO` object with .env content, used if `dotenv_path` is `None`. + verbose: Whether to output a warning if the .env file is missing. + encoding: Encoding to be used to read the file. + + If both `dotenv_path` and `stream` are `None`, `find_dotenv()` is used to find the + .env file. + """ + if dotenv_path is None and stream is None: + dotenv_path = find_dotenv() + + return DotEnv( + dotenv_path=dotenv_path, + stream=stream, + verbose=verbose, + interpolate=interpolate, + override=True, + encoding=encoding, + ).dict() diff --git a/hackaton/lib/python3.12/site-packages/dotenv/parser.py b/hackaton/lib/python3.12/site-packages/dotenv/parser.py new file mode 100644 index 0000000..735f14a --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/dotenv/parser.py @@ -0,0 +1,175 @@ +import codecs +import re +from typing import (IO, Iterator, Match, NamedTuple, Optional, # noqa:F401 + Pattern, Sequence, Tuple) + + +def make_regex(string: str, extra_flags: int = 0) -> Pattern[str]: + return re.compile(string, re.UNICODE | extra_flags) + + +_newline = make_regex(r"(\r\n|\n|\r)") +_multiline_whitespace = make_regex(r"\s*", extra_flags=re.MULTILINE) +_whitespace = make_regex(r"[^\S\r\n]*") +_export = make_regex(r"(?:export[^\S\r\n]+)?") +_single_quoted_key = make_regex(r"'([^']+)'") +_unquoted_key = make_regex(r"([^=\#\s]+)") +_equal_sign = make_regex(r"(=[^\S\r\n]*)") +_single_quoted_value = make_regex(r"'((?:\\'|[^'])*)'") +_double_quoted_value = make_regex(r'"((?:\\"|[^"])*)"') +_unquoted_value = make_regex(r"([^\r\n]*)") +_comment = make_regex(r"(?:[^\S\r\n]*#[^\r\n]*)?") +_end_of_line = make_regex(r"[^\S\r\n]*(?:\r\n|\n|\r|$)") +_rest_of_line = make_regex(r"[^\r\n]*(?:\r|\n|\r\n)?") +_double_quote_escapes = make_regex(r"\\[\\'\"abfnrtv]") +_single_quote_escapes = make_regex(r"\\[\\']") + + +class Original(NamedTuple): + string: str + line: int + + +class Binding(NamedTuple): + key: Optional[str] + value: Optional[str] + original: Original + error: bool + + +class Position: + def __init__(self, chars: int, line: int) -> None: + self.chars = chars + self.line = line + + @classmethod + def start(cls) -> "Position": + return cls(chars=0, line=1) + + def set(self, other: "Position") -> None: + self.chars = other.chars + self.line = other.line + + def advance(self, string: str) -> None: + self.chars += len(string) + self.line += len(re.findall(_newline, string)) + + +class Error(Exception): + pass + + +class Reader: + def __init__(self, stream: IO[str]) -> None: + self.string = stream.read() + self.position = Position.start() + self.mark = Position.start() + + def has_next(self) -> bool: + return self.position.chars < len(self.string) + + def set_mark(self) -> None: + self.mark.set(self.position) + + def get_marked(self) -> Original: + return Original( + string=self.string[self.mark.chars:self.position.chars], + line=self.mark.line, + ) + + def peek(self, count: int) -> str: + return self.string[self.position.chars:self.position.chars + count] + + def read(self, count: int) -> str: + result = self.string[self.position.chars:self.position.chars + count] + if len(result) < count: + raise Error("read: End of string") + self.position.advance(result) + return result + + def read_regex(self, regex: Pattern[str]) -> Sequence[str]: + match = regex.match(self.string, self.position.chars) + if match is None: + raise Error("read_regex: Pattern not found") + self.position.advance(self.string[match.start():match.end()]) + return match.groups() + + +def decode_escapes(regex: Pattern[str], string: str) -> str: + def decode_match(match: Match[str]) -> str: + return codecs.decode(match.group(0), 'unicode-escape') # type: ignore + + return regex.sub(decode_match, string) + + +def parse_key(reader: Reader) -> Optional[str]: + char = reader.peek(1) + if char == "#": + return None + elif char == "'": + (key,) = reader.read_regex(_single_quoted_key) + else: + (key,) = reader.read_regex(_unquoted_key) + return key + + +def parse_unquoted_value(reader: Reader) -> str: + (part,) = reader.read_regex(_unquoted_value) + return re.sub(r"\s+#.*", "", part).rstrip() + + +def parse_value(reader: Reader) -> str: + char = reader.peek(1) + if char == u"'": + (value,) = reader.read_regex(_single_quoted_value) + return decode_escapes(_single_quote_escapes, value) + elif char == u'"': + (value,) = reader.read_regex(_double_quoted_value) + return decode_escapes(_double_quote_escapes, value) + elif char in (u"", u"\n", u"\r"): + return u"" + else: + return parse_unquoted_value(reader) + + +def parse_binding(reader: Reader) -> Binding: + reader.set_mark() + try: + reader.read_regex(_multiline_whitespace) + if not reader.has_next(): + return Binding( + key=None, + value=None, + original=reader.get_marked(), + error=False, + ) + reader.read_regex(_export) + key = parse_key(reader) + reader.read_regex(_whitespace) + if reader.peek(1) == "=": + reader.read_regex(_equal_sign) + value: Optional[str] = parse_value(reader) + else: + value = None + reader.read_regex(_comment) + reader.read_regex(_end_of_line) + return Binding( + key=key, + value=value, + original=reader.get_marked(), + error=False, + ) + except Error: + reader.read_regex(_rest_of_line) + return Binding( + key=None, + value=None, + original=reader.get_marked(), + error=True, + ) + + +def parse_stream(stream: IO[str]) -> Iterator[Binding]: + reader = Reader(stream) + while reader.has_next(): + yield parse_binding(reader) diff --git a/hackaton/lib/python3.12/site-packages/dotenv/py.typed b/hackaton/lib/python3.12/site-packages/dotenv/py.typed new file mode 100644 index 0000000..7632ecf --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/dotenv/py.typed @@ -0,0 +1 @@ +# Marker file for PEP 561 diff --git a/hackaton/lib/python3.12/site-packages/dotenv/variables.py b/hackaton/lib/python3.12/site-packages/dotenv/variables.py new file mode 100644 index 0000000..667f2f2 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/dotenv/variables.py @@ -0,0 +1,86 @@ +import re +from abc import ABCMeta, abstractmethod +from typing import Iterator, Mapping, Optional, Pattern + +_posix_variable: Pattern[str] = re.compile( + r""" + \$\{ + (?P[^\}:]*) + (?::- + (?P[^\}]*) + )? + \} + """, + re.VERBOSE, +) + + +class Atom(metaclass=ABCMeta): + def __ne__(self, other: object) -> bool: + result = self.__eq__(other) + if result is NotImplemented: + return NotImplemented + return not result + + @abstractmethod + def resolve(self, env: Mapping[str, Optional[str]]) -> str: ... + + +class Literal(Atom): + def __init__(self, value: str) -> None: + self.value = value + + def __repr__(self) -> str: + return f"Literal(value={self.value})" + + def __eq__(self, other: object) -> bool: + if not isinstance(other, self.__class__): + return NotImplemented + return self.value == other.value + + def __hash__(self) -> int: + return hash((self.__class__, self.value)) + + def resolve(self, env: Mapping[str, Optional[str]]) -> str: + return self.value + + +class Variable(Atom): + def __init__(self, name: str, default: Optional[str]) -> None: + self.name = name + self.default = default + + def __repr__(self) -> str: + return f"Variable(name={self.name}, default={self.default})" + + def __eq__(self, other: object) -> bool: + if not isinstance(other, self.__class__): + return NotImplemented + return (self.name, self.default) == (other.name, other.default) + + def __hash__(self) -> int: + return hash((self.__class__, self.name, self.default)) + + def resolve(self, env: Mapping[str, Optional[str]]) -> str: + default = self.default if self.default is not None else "" + result = env.get(self.name, default) + return result if result is not None else "" + + +def parse_variables(value: str) -> Iterator[Atom]: + cursor = 0 + + for match in _posix_variable.finditer(value): + (start, end) = match.span() + name = match["name"] + default = match["default"] + + if start > cursor: + yield Literal(value=value[cursor:start]) + + yield Variable(name=name, default=default) + cursor = end + + length = len(value) + if cursor < length: + yield Literal(value=value[cursor:length]) diff --git a/hackaton/lib/python3.12/site-packages/dotenv/version.py b/hackaton/lib/python3.12/site-packages/dotenv/version.py new file mode 100644 index 0000000..5c4105c --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/dotenv/version.py @@ -0,0 +1 @@ +__version__ = "1.0.1" diff --git a/hackaton/lib/python3.12/site-packages/httptools-0.6.4.dist-info/INSTALLER b/hackaton/lib/python3.12/site-packages/httptools-0.6.4.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/httptools-0.6.4.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/hackaton/lib/python3.12/site-packages/httptools-0.6.4.dist-info/LICENSE b/hackaton/lib/python3.12/site-packages/httptools-0.6.4.dist-info/LICENSE new file mode 100644 index 0000000..79a03ca --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/httptools-0.6.4.dist-info/LICENSE @@ -0,0 +1,21 @@ +The MIT License + +Copyright (c) 2015 MagicStack Inc. http://magic.io + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/hackaton/lib/python3.12/site-packages/httptools-0.6.4.dist-info/METADATA b/hackaton/lib/python3.12/site-packages/httptools-0.6.4.dist-info/METADATA new file mode 100644 index 0000000..8e35e17 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/httptools-0.6.4.dist-info/METADATA @@ -0,0 +1,133 @@ +Metadata-Version: 2.1 +Name: httptools +Version: 0.6.4 +Summary: A collection of framework independent HTTP protocol utils. +Home-page: https://github.com/MagicStack/httptools +Author: Yury Selivanov +Author-email: yury@magic.io +License: MIT +Platform: macOS +Platform: POSIX +Platform: Windows +Classifier: License :: OSI Approved :: MIT License +Classifier: Intended Audience :: Developers +Classifier: Programming Language :: Python :: 3 +Classifier: Operating System :: POSIX +Classifier: Operating System :: MacOS :: MacOS X +Classifier: Environment :: Web Environment +Classifier: Development Status :: 5 - Production/Stable +Requires-Python: >=3.8.0 +Description-Content-Type: text/markdown +License-File: LICENSE +Provides-Extra: test +Requires-Dist: Cython >=0.29.24 ; extra == 'test' + +![Tests](https://github.com/MagicStack/httptools/workflows/Tests/badge.svg) + +httptools is a Python binding for the nodejs HTTP parser. + +The package is available on PyPI: `pip install httptools`. + + +# APIs + +httptools contains two classes `httptools.HttpRequestParser`, +`httptools.HttpResponseParser` (fulfilled through +[llhttp](https://github.com/nodejs/llhttp)) and a function for +parsing URLs `httptools.parse_url` (through +[http-parse](https://github.com/nodejs/http-parser) for now). +See unittests for examples. + + +```python + +class HttpRequestParser: + + def __init__(self, protocol): + """HttpRequestParser + + protocol -- a Python object with the following methods + (all optional): + + - on_message_begin() + - on_url(url: bytes) + - on_header(name: bytes, value: bytes) + - on_headers_complete() + - on_body(body: bytes) + - on_message_complete() + - on_chunk_header() + - on_chunk_complete() + - on_status(status: bytes) + """ + + def get_http_version(self) -> str: + """Return an HTTP protocol version.""" + + def should_keep_alive(self) -> bool: + """Return ``True`` if keep-alive mode is preferred.""" + + def should_upgrade(self) -> bool: + """Return ``True`` if the parsed request is a valid Upgrade request. + The method exposes a flag set just before on_headers_complete. + Calling this method earlier will only yield `False`. + """ + + def feed_data(self, data: bytes): + """Feed data to the parser. + + Will eventually trigger callbacks on the ``protocol`` + object. + + On HTTP upgrade, this method will raise an + ``HttpParserUpgrade`` exception, with its sole argument + set to the offset of the non-HTTP data in ``data``. + """ + + def get_method(self) -> bytes: + """Return HTTP request method (GET, HEAD, etc)""" + + +class HttpResponseParser: + + """Has all methods except ``get_method()`` that + HttpRequestParser has.""" + + def get_status_code(self) -> int: + """Return the status code of the HTTP response""" + + +def parse_url(url: bytes): + """Parse URL strings into a structured Python object. + + Returns an instance of ``httptools.URL`` class with the + following attributes: + + - schema: bytes + - host: bytes + - port: int + - path: bytes + - query: bytes + - fragment: bytes + - userinfo: bytes + """ +``` + + +# Development + +1. Clone this repository with + `git clone --recursive git@github.com:MagicStack/httptools.git` + +2. Create a virtual environment with Python 3: + `python3 -m venv envname` + +3. Activate the environment with `source envname/bin/activate` + +4. Install development requirements with `pip install -e .[test]` + +5. Run `make` and `make test`. + + +# License + +MIT. diff --git a/hackaton/lib/python3.12/site-packages/httptools-0.6.4.dist-info/RECORD b/hackaton/lib/python3.12/site-packages/httptools-0.6.4.dist-info/RECORD new file mode 100644 index 0000000..facede9 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/httptools-0.6.4.dist-info/RECORD @@ -0,0 +1,21 @@ +httptools-0.6.4.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +httptools-0.6.4.dist-info/LICENSE,sha256=9Fc-fLdnZ0X7W402-lSKqT45HPtoct2s1lEwxF6mqS0,1093 +httptools-0.6.4.dist-info/METADATA,sha256=TeeHZl3JvfneY2qmDgq6pk1saKikURf4Dz41IRt19Hg,3585 +httptools-0.6.4.dist-info/RECORD,, +httptools-0.6.4.dist-info/WHEEL,sha256=T94HOVPNbYE6jyG6QmiIglWJ01nwJvHIWFgubY8hhjc,109 +httptools-0.6.4.dist-info/top_level.txt,sha256=APjJKTbZcj0OQ4fdgf2eTCk82nK1n2BFXOD7ky41MPY,10 +httptools/__init__.py,sha256=plt3MIbueJdco9Dy7zoH3ksLNeyirqWagat5rwRmAjo,147 +httptools/__pycache__/__init__.cpython-312.pyc,, +httptools/__pycache__/_version.cpython-312.pyc,, +httptools/_version.py,sha256=ASqOB8fLS7jwZsM551Lc49WxYPyjteqnz1iDWmka-KA,575 +httptools/parser/__init__.py,sha256=fWyconPEHZlJojzRwmBKSn4C85OGXmKEwiEcdjHqXO8,166 +httptools/parser/__pycache__/__init__.cpython-312.pyc,, +httptools/parser/__pycache__/errors.cpython-312.pyc,, +httptools/parser/cparser.pxd,sha256=4qBxnma83Vz86Z9sOZRxjqYj20A-aLSWVGXZgTVLJqE,4977 +httptools/parser/errors.py,sha256=ZVrtN1smPIb_opQ2Ud3uCbGlNLMlECYM2-6S7r5LnHs,566 +httptools/parser/parser.cpython-312-darwin.so,sha256=lUQCzfmMRKxNIhaJWzzzwtz6tWz7pEsG5v6aUafoieI,198832 +httptools/parser/parser.pyx,sha256=x0BUY9EzHNKCDaw-U8bkZ1MaKGtrOQ8iVCm1IuOtEQI,15140 +httptools/parser/python.pxd,sha256=zWCdGZh34fyQNt3BUHIUjPqY8a5sodRUkfdABxqYHgQ,138 +httptools/parser/url_cparser.pxd,sha256=X5dDI8A7T0l5HL_Czt0mTs0l_d2lXnUDHx1TN8LeiCM,779 +httptools/parser/url_parser.cpython-312-darwin.so,sha256=1Ux94Y76PwJNxLY9zY0ni8XTCh81sG2z5UAPgZ_mDNQ,121952 +httptools/parser/url_parser.pyx,sha256=ZJVUZqrIDdhzVodA7tTtoFb570av-SczIyh2oAZXKzM,3758 diff --git a/hackaton/lib/python3.12/site-packages/httptools-0.6.4.dist-info/WHEEL b/hackaton/lib/python3.12/site-packages/httptools-0.6.4.dist-info/WHEEL new file mode 100644 index 0000000..edd13a0 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/httptools-0.6.4.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: setuptools (75.2.0) +Root-Is-Purelib: false +Tag: cp312-cp312-macosx_11_0_arm64 + diff --git a/hackaton/lib/python3.12/site-packages/httptools-0.6.4.dist-info/top_level.txt b/hackaton/lib/python3.12/site-packages/httptools-0.6.4.dist-info/top_level.txt new file mode 100644 index 0000000..bef3b40 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/httptools-0.6.4.dist-info/top_level.txt @@ -0,0 +1 @@ +httptools diff --git a/hackaton/lib/python3.12/site-packages/httptools/__init__.py b/hackaton/lib/python3.12/site-packages/httptools/__init__.py new file mode 100644 index 0000000..972053e --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/httptools/__init__.py @@ -0,0 +1,6 @@ +from . import parser +from .parser import * # NOQA + +from ._version import __version__ # NOQA + +__all__ = parser.__all__ + ('__version__',) # NOQA diff --git a/hackaton/lib/python3.12/site-packages/httptools/__pycache__/__init__.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/httptools/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..5869ced Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/httptools/__pycache__/__init__.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/httptools/__pycache__/_version.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/httptools/__pycache__/_version.cpython-312.pyc new file mode 100644 index 0000000..3615537 Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/httptools/__pycache__/_version.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/httptools/_version.py b/hackaton/lib/python3.12/site-packages/httptools/_version.py new file mode 100644 index 0000000..d49ea6f --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/httptools/_version.py @@ -0,0 +1,13 @@ +# This file MUST NOT contain anything but the __version__ assignment. +# +# When making a release, change the value of __version__ +# to an appropriate value, and open a pull request against +# the correct branch (master if making a new feature release). +# The commit message MUST contain a properly formatted release +# log, and the commit must be signed. +# +# The release automation will: build and test the packages for the +# supported platforms, publish the packages on PyPI, merge the PR +# to the target branch, create a Git tag pointing to the commit. + +__version__ = '0.6.4' diff --git a/hackaton/lib/python3.12/site-packages/httptools/parser/__init__.py b/hackaton/lib/python3.12/site-packages/httptools/parser/__init__.py new file mode 100644 index 0000000..ba371f5 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/httptools/parser/__init__.py @@ -0,0 +1,5 @@ +from .parser import * # NoQA +from .errors import * # NoQA +from .url_parser import * # NoQA + +__all__ = parser.__all__ + errors.__all__ + url_parser.__all__ # NoQA diff --git a/hackaton/lib/python3.12/site-packages/httptools/parser/__pycache__/__init__.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/httptools/parser/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..fb6b093 Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/httptools/parser/__pycache__/__init__.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/httptools/parser/__pycache__/errors.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/httptools/parser/__pycache__/errors.cpython-312.pyc new file mode 100644 index 0000000..99d9ff0 Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/httptools/parser/__pycache__/errors.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/httptools/parser/cparser.pxd b/hackaton/lib/python3.12/site-packages/httptools/parser/cparser.pxd new file mode 100644 index 0000000..3281864 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/httptools/parser/cparser.pxd @@ -0,0 +1,167 @@ +from libc.stdint cimport int32_t, uint8_t, uint16_t, uint64_t + + +cdef extern from "llhttp.h": + struct llhttp__internal_s: + int32_t _index + void *_span_pos0 + void *_span_cb0 + int32_t error + const char *reason + const char *error_pos + void *data + void *_current + uint64_t content_length + uint8_t type + uint8_t method + uint8_t http_major + uint8_t http_minor + uint8_t header_state + uint16_t flags + uint8_t upgrade + uint16_t status_code + uint8_t finish + void *settings + ctypedef llhttp__internal_s llhttp__internal_t + ctypedef llhttp__internal_t llhttp_t + + ctypedef int (*llhttp_data_cb) (llhttp_t*, + const char *at, + size_t length) except -1 + + ctypedef int (*llhttp_cb) (llhttp_t*) except -1 + + struct llhttp_settings_s: + llhttp_cb on_message_begin + llhttp_data_cb on_url + llhttp_data_cb on_status + llhttp_data_cb on_header_field + llhttp_data_cb on_header_value + llhttp_cb on_headers_complete + llhttp_data_cb on_body + llhttp_cb on_message_complete + llhttp_cb on_chunk_header + llhttp_cb on_chunk_complete + ctypedef llhttp_settings_s llhttp_settings_t + + enum llhttp_type: + HTTP_BOTH, + HTTP_REQUEST, + HTTP_RESPONSE + ctypedef llhttp_type llhttp_type_t + + enum llhttp_errno: + HPE_OK, + HPE_INTERNAL, + HPE_STRICT, + HPE_LF_EXPECTED, + HPE_UNEXPECTED_CONTENT_LENGTH, + HPE_CLOSED_CONNECTION, + HPE_INVALID_METHOD, + HPE_INVALID_URL, + HPE_INVALID_CONSTANT, + HPE_INVALID_VERSION, + HPE_INVALID_HEADER_TOKEN, + HPE_INVALID_CONTENT_LENGTH, + HPE_INVALID_CHUNK_SIZE, + HPE_INVALID_STATUS, + HPE_INVALID_EOF_STATE, + HPE_INVALID_TRANSFER_ENCODING, + HPE_CB_MESSAGE_BEGIN, + HPE_CB_HEADERS_COMPLETE, + HPE_CB_MESSAGE_COMPLETE, + HPE_CB_CHUNK_HEADER, + HPE_CB_CHUNK_COMPLETE, + HPE_PAUSED, + HPE_PAUSED_UPGRADE, + HPE_USER + ctypedef llhttp_errno llhttp_errno_t + + enum llhttp_flags: + F_CONNECTION_KEEP_ALIVE, + F_CONNECTION_CLOSE, + F_CONNECTION_UPGRADE, + F_CHUNKED, + F_UPGRADE, + F_CONTENT_LENGTH, + F_SKIPBODY, + F_TRAILING, + F_LENIENT, + F_TRANSFER_ENCODING + ctypedef llhttp_flags llhttp_flags_t + + enum llhttp_method: + HTTP_DELETE, + HTTP_GET, + HTTP_HEAD, + HTTP_POST, + HTTP_PUT, + HTTP_CONNECT, + HTTP_OPTIONS, + HTTP_TRACE, + HTTP_COPY, + HTTP_LOCK, + HTTP_MKCOL, + HTTP_MOVE, + HTTP_PROPFIND, + HTTP_PROPPATCH, + HTTP_SEARCH, + HTTP_UNLOCK, + HTTP_BIND, + HTTP_REBIND, + HTTP_UNBIND, + HTTP_ACL, + HTTP_REPORT, + HTTP_MKACTIVITY, + HTTP_CHECKOUT, + HTTP_MERGE, + HTTP_MSEARCH, + HTTP_NOTIFY, + HTTP_SUBSCRIBE, + HTTP_UNSUBSCRIBE, + HTTP_PATCH, + HTTP_PURGE, + HTTP_MKCALENDAR, + HTTP_LINK, + HTTP_UNLINK, + HTTP_SOURCE, + HTTP_PRI, + HTTP_DESCRIBE, + HTTP_ANNOUNCE, + HTTP_SETUP, + HTTP_PLAY, + HTTP_PAUSE, + HTTP_TEARDOWN, + HTTP_GET_PARAMETER, + HTTP_SET_PARAMETER, + HTTP_REDIRECT, + HTTP_RECORD, + HTTP_FLUSH + ctypedef llhttp_method llhttp_method_t + + void llhttp_init(llhttp_t* parser, llhttp_type_t type, const llhttp_settings_t* settings) + + void llhttp_settings_init(llhttp_settings_t* settings) + + llhttp_errno_t llhttp_execute(llhttp_t* parser, const char* data, size_t len) + + void llhttp_resume_after_upgrade(llhttp_t* parser) + + int llhttp_should_keep_alive(const llhttp_t* parser) + + const char* llhttp_get_error_pos(const llhttp_t* parser) + const char* llhttp_get_error_reason(const llhttp_t* parser) + const char* llhttp_method_name(llhttp_method_t method) + + void llhttp_set_error_reason(llhttp_t* parser, const char* reason); + + void llhttp_set_lenient_headers(llhttp_t* parser, bint enabled); + void llhttp_set_lenient_chunked_length(llhttp_t* parser, bint enabled); + void llhttp_set_lenient_keep_alive(llhttp_t* parser, bint enabled); + void llhttp_set_lenient_transfer_encoding(llhttp_t* parser, bint enabled); + void llhttp_set_lenient_version(llhttp_t* parser, bint enabled); + void llhttp_set_lenient_data_after_close(llhttp_t* parser, bint enabled); + void llhttp_set_lenient_optional_lf_after_cr(llhttp_t* parser, bint enabled); + void llhttp_set_lenient_optional_cr_before_lf(llhttp_t* parser, bint enabled); + void llhttp_set_lenient_optional_crlf_after_chunk(llhttp_t* parser, bint enabled); + void llhttp_set_lenient_spaces_after_chunk_size(llhttp_t* parser, bint enabled); diff --git a/hackaton/lib/python3.12/site-packages/httptools/parser/errors.py b/hackaton/lib/python3.12/site-packages/httptools/parser/errors.py new file mode 100644 index 0000000..bc24c46 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/httptools/parser/errors.py @@ -0,0 +1,30 @@ +__all__ = ('HttpParserError', + 'HttpParserCallbackError', + 'HttpParserInvalidStatusError', + 'HttpParserInvalidMethodError', + 'HttpParserInvalidURLError', + 'HttpParserUpgrade') + + +class HttpParserError(Exception): + pass + + +class HttpParserCallbackError(HttpParserError): + pass + + +class HttpParserInvalidStatusError(HttpParserError): + pass + + +class HttpParserInvalidMethodError(HttpParserError): + pass + + +class HttpParserInvalidURLError(HttpParserError): + pass + + +class HttpParserUpgrade(Exception): + pass diff --git a/hackaton/lib/python3.12/site-packages/httptools/parser/parser.cpython-312-darwin.so b/hackaton/lib/python3.12/site-packages/httptools/parser/parser.cpython-312-darwin.so new file mode 100755 index 0000000..27fe2a9 Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/httptools/parser/parser.cpython-312-darwin.so differ diff --git a/hackaton/lib/python3.12/site-packages/httptools/parser/parser.pyx b/hackaton/lib/python3.12/site-packages/httptools/parser/parser.pyx new file mode 100644 index 0000000..2fa5026 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/httptools/parser/parser.pyx @@ -0,0 +1,436 @@ +#cython: language_level=3 + +from __future__ import print_function +from typing import Optional + +from cpython.mem cimport PyMem_Malloc, PyMem_Free +from cpython cimport PyObject_GetBuffer, PyBuffer_Release, PyBUF_SIMPLE, \ + Py_buffer, PyBytes_AsString + +from .python cimport PyMemoryView_Check, PyMemoryView_GET_BUFFER + + +from .errors import (HttpParserError, + HttpParserCallbackError, + HttpParserInvalidStatusError, + HttpParserInvalidMethodError, + HttpParserInvalidURLError, + HttpParserUpgrade) + +cimport cython +from . cimport cparser + + +__all__ = ('HttpRequestParser', 'HttpResponseParser') + + +@cython.internal +cdef class HttpParser: + + cdef: + cparser.llhttp_t* _cparser + cparser.llhttp_settings_t* _csettings + + bytes _current_header_name + bytes _current_header_value + + _proto_on_url, _proto_on_status, _proto_on_body, \ + _proto_on_header, _proto_on_headers_complete, \ + _proto_on_message_complete, _proto_on_chunk_header, \ + _proto_on_chunk_complete, _proto_on_message_begin + + object _last_error + + Py_buffer py_buf + + def __cinit__(self): + self._cparser = \ + PyMem_Malloc(sizeof(cparser.llhttp_t)) + if self._cparser is NULL: + raise MemoryError() + + self._csettings = \ + PyMem_Malloc(sizeof(cparser.llhttp_settings_t)) + if self._csettings is NULL: + raise MemoryError() + + def __dealloc__(self): + PyMem_Free(self._cparser) + PyMem_Free(self._csettings) + + cdef _init(self, protocol, cparser.llhttp_type_t mode): + cparser.llhttp_settings_init(self._csettings) + + cparser.llhttp_init(self._cparser, mode, self._csettings) + self._cparser.data = self + + self._current_header_name = None + self._current_header_value = None + + self._proto_on_header = getattr(protocol, 'on_header', None) + if self._proto_on_header is not None: + self._csettings.on_header_field = cb_on_header_field + self._csettings.on_header_value = cb_on_header_value + self._proto_on_headers_complete = getattr( + protocol, 'on_headers_complete', None) + self._csettings.on_headers_complete = cb_on_headers_complete + + self._proto_on_body = getattr(protocol, 'on_body', None) + if self._proto_on_body is not None: + self._csettings.on_body = cb_on_body + + self._proto_on_message_begin = getattr( + protocol, 'on_message_begin', None) + if self._proto_on_message_begin is not None: + self._csettings.on_message_begin = cb_on_message_begin + + self._proto_on_message_complete = getattr( + protocol, 'on_message_complete', None) + if self._proto_on_message_complete is not None: + self._csettings.on_message_complete = cb_on_message_complete + + self._proto_on_chunk_header = getattr( + protocol, 'on_chunk_header', None) + self._csettings.on_chunk_header = cb_on_chunk_header + + self._proto_on_chunk_complete = getattr( + protocol, 'on_chunk_complete', None) + self._csettings.on_chunk_complete = cb_on_chunk_complete + + self._last_error = None + + cdef _maybe_call_on_header(self): + if self._current_header_value is not None: + current_header_name = self._current_header_name + current_header_value = self._current_header_value + + self._current_header_name = self._current_header_value = None + + if self._proto_on_header is not None: + self._proto_on_header(current_header_name, + current_header_value) + + cdef _on_header_field(self, bytes field): + self._maybe_call_on_header() + if self._current_header_name is None: + self._current_header_name = field + else: + self._current_header_name += field + + cdef _on_header_value(self, bytes val): + if self._current_header_value is None: + self._current_header_value = val + else: + # This is unlikely, as mostly HTTP headers are one-line + self._current_header_value += val + + cdef _on_headers_complete(self): + self._maybe_call_on_header() + + if self._proto_on_headers_complete is not None: + self._proto_on_headers_complete() + + cdef _on_chunk_header(self): + if (self._current_header_value is not None or + self._current_header_name is not None): + raise HttpParserError('invalid headers state') + + if self._proto_on_chunk_header is not None: + self._proto_on_chunk_header() + + cdef _on_chunk_complete(self): + self._maybe_call_on_header() + + if self._proto_on_chunk_complete is not None: + self._proto_on_chunk_complete() + + ### Public API ### + + def set_dangerous_leniencies( + self, + lenient_headers: Optional[bool] = None, + lenient_chunked_length: Optional[bool] = None, + lenient_keep_alive: Optional[bool] = None, + lenient_transfer_encoding: Optional[bool] = None, + lenient_version: Optional[bool] = None, + lenient_data_after_close: Optional[bool] = None, + lenient_optional_lf_after_cr: Optional[bool] = None, + lenient_optional_cr_before_lf: Optional[bool] = None, + lenient_optional_crlf_after_chunk: Optional[bool] = None, + lenient_spaces_after_chunk_size: Optional[bool] = None, + ): + cdef cparser.llhttp_t* parser = self._cparser + if lenient_headers is not None: + cparser.llhttp_set_lenient_headers( + parser, lenient_headers) + if lenient_chunked_length is not None: + cparser.llhttp_set_lenient_chunked_length( + parser, lenient_chunked_length) + if lenient_keep_alive is not None: + cparser.llhttp_set_lenient_keep_alive( + parser, lenient_keep_alive) + if lenient_transfer_encoding is not None: + cparser.llhttp_set_lenient_transfer_encoding( + parser, lenient_transfer_encoding) + if lenient_version is not None: + cparser.llhttp_set_lenient_version( + parser, lenient_version) + if lenient_data_after_close is not None: + cparser.llhttp_set_lenient_data_after_close( + parser, lenient_data_after_close) + if lenient_optional_lf_after_cr is not None: + cparser.llhttp_set_lenient_optional_lf_after_cr( + parser, lenient_optional_lf_after_cr) + if lenient_optional_cr_before_lf is not None: + cparser.llhttp_set_lenient_optional_cr_before_lf( + parser, lenient_optional_cr_before_lf) + if lenient_optional_crlf_after_chunk is not None: + cparser.llhttp_set_lenient_optional_crlf_after_chunk( + parser, lenient_optional_crlf_after_chunk) + if lenient_spaces_after_chunk_size is not None: + cparser.llhttp_set_lenient_spaces_after_chunk_size( + parser, lenient_spaces_after_chunk_size) + + def get_http_version(self): + cdef cparser.llhttp_t* parser = self._cparser + return '{}.{}'.format(parser.http_major, parser.http_minor) + + def should_keep_alive(self): + return bool(cparser.llhttp_should_keep_alive(self._cparser)) + + def should_upgrade(self): + cdef cparser.llhttp_t* parser = self._cparser + return bool(parser.upgrade) + + def feed_data(self, data): + cdef: + size_t data_len + cparser.llhttp_errno_t err + Py_buffer *buf + bint owning_buf = False + const char* err_pos + + if PyMemoryView_Check(data): + buf = PyMemoryView_GET_BUFFER(data) + data_len = buf.len + err = cparser.llhttp_execute( + self._cparser, + buf.buf, + data_len) + + else: + buf = &self.py_buf + PyObject_GetBuffer(data, buf, PyBUF_SIMPLE) + owning_buf = True + data_len = buf.len + + err = cparser.llhttp_execute( + self._cparser, + buf.buf, + data_len) + + try: + if self._cparser.upgrade == 1 and err == cparser.HPE_PAUSED_UPGRADE: + err_pos = cparser.llhttp_get_error_pos(self._cparser) + + # Immediately free the parser from "error" state, simulating + # http-parser behavior here because 1) we never had the API to + # allow users manually "resume after upgrade", and 2) the use + # case for resuming parsing is very rare. + cparser.llhttp_resume_after_upgrade(self._cparser) + + # The err_pos here is specific for the input buf. So if we ever + # switch to the llhttp behavior (re-raise HttpParserUpgrade for + # successive calls to feed_data() until resume_after_upgrade is + # called), we have to store the result and keep our own state. + raise HttpParserUpgrade(err_pos - buf.buf) + finally: + if owning_buf: + PyBuffer_Release(buf) + + if err != cparser.HPE_OK: + ex = parser_error_from_errno( + self._cparser, + self._cparser.error) + if isinstance(ex, HttpParserCallbackError): + if self._last_error is not None: + ex.__context__ = self._last_error + self._last_error = None + raise ex + + +cdef class HttpRequestParser(HttpParser): + + def __init__(self, protocol): + self._init(protocol, cparser.HTTP_REQUEST) + + self._proto_on_url = getattr(protocol, 'on_url', None) + if self._proto_on_url is not None: + self._csettings.on_url = cb_on_url + + def get_method(self): + cdef cparser.llhttp_t* parser = self._cparser + return cparser.llhttp_method_name( parser.method) + + +cdef class HttpResponseParser(HttpParser): + + def __init__(self, protocol): + self._init(protocol, cparser.HTTP_RESPONSE) + + self._proto_on_status = getattr(protocol, 'on_status', None) + if self._proto_on_status is not None: + self._csettings.on_status = cb_on_status + + def get_status_code(self): + cdef cparser.llhttp_t* parser = self._cparser + return parser.status_code + + +cdef int cb_on_message_begin(cparser.llhttp_t* parser) except -1: + cdef HttpParser pyparser = parser.data + try: + pyparser._proto_on_message_begin() + except BaseException as ex: + pyparser._last_error = ex + return -1 + else: + return 0 + + +cdef int cb_on_url(cparser.llhttp_t* parser, + const char *at, size_t length) except -1: + cdef HttpParser pyparser = parser.data + try: + pyparser._proto_on_url(at[:length]) + except BaseException as ex: + cparser.llhttp_set_error_reason(parser, "`on_url` callback error") + pyparser._last_error = ex + return cparser.HPE_USER + else: + return 0 + + +cdef int cb_on_status(cparser.llhttp_t* parser, + const char *at, size_t length) except -1: + cdef HttpParser pyparser = parser.data + try: + pyparser._proto_on_status(at[:length]) + except BaseException as ex: + cparser.llhttp_set_error_reason(parser, "`on_status` callback error") + pyparser._last_error = ex + return cparser.HPE_USER + else: + return 0 + + +cdef int cb_on_header_field(cparser.llhttp_t* parser, + const char *at, size_t length) except -1: + cdef HttpParser pyparser = parser.data + try: + pyparser._on_header_field(at[:length]) + except BaseException as ex: + cparser.llhttp_set_error_reason(parser, "`on_header_field` callback error") + pyparser._last_error = ex + return cparser.HPE_USER + else: + return 0 + + +cdef int cb_on_header_value(cparser.llhttp_t* parser, + const char *at, size_t length) except -1: + cdef HttpParser pyparser = parser.data + try: + pyparser._on_header_value(at[:length]) + except BaseException as ex: + cparser.llhttp_set_error_reason(parser, "`on_header_value` callback error") + pyparser._last_error = ex + return cparser.HPE_USER + else: + return 0 + + +cdef int cb_on_headers_complete(cparser.llhttp_t* parser) except -1: + cdef HttpParser pyparser = parser.data + try: + pyparser._on_headers_complete() + except BaseException as ex: + pyparser._last_error = ex + return -1 + else: + if pyparser._cparser.upgrade: + return 1 + else: + return 0 + + +cdef int cb_on_body(cparser.llhttp_t* parser, + const char *at, size_t length) except -1: + cdef HttpParser pyparser = parser.data + try: + pyparser._proto_on_body(at[:length]) + except BaseException as ex: + cparser.llhttp_set_error_reason(parser, "`on_body` callback error") + pyparser._last_error = ex + return cparser.HPE_USER + else: + return 0 + + +cdef int cb_on_message_complete(cparser.llhttp_t* parser) except -1: + cdef HttpParser pyparser = parser.data + try: + pyparser._proto_on_message_complete() + except BaseException as ex: + pyparser._last_error = ex + return -1 + else: + return 0 + + +cdef int cb_on_chunk_header(cparser.llhttp_t* parser) except -1: + cdef HttpParser pyparser = parser.data + try: + pyparser._on_chunk_header() + except BaseException as ex: + pyparser._last_error = ex + return -1 + else: + return 0 + + +cdef int cb_on_chunk_complete(cparser.llhttp_t* parser) except -1: + cdef HttpParser pyparser = parser.data + try: + pyparser._on_chunk_complete() + except BaseException as ex: + pyparser._last_error = ex + return -1 + else: + return 0 + + +cdef parser_error_from_errno(cparser.llhttp_t* parser, cparser.llhttp_errno_t errno): + cdef bytes reason = cparser.llhttp_get_error_reason(parser) + + if errno in (cparser.HPE_CB_MESSAGE_BEGIN, + cparser.HPE_CB_HEADERS_COMPLETE, + cparser.HPE_CB_MESSAGE_COMPLETE, + cparser.HPE_CB_CHUNK_HEADER, + cparser.HPE_CB_CHUNK_COMPLETE, + cparser.HPE_USER): + cls = HttpParserCallbackError + + elif errno == cparser.HPE_INVALID_STATUS: + cls = HttpParserInvalidStatusError + + elif errno == cparser.HPE_INVALID_METHOD: + cls = HttpParserInvalidMethodError + + elif errno == cparser.HPE_INVALID_URL: + cls = HttpParserInvalidURLError + + else: + cls = HttpParserError + + return cls(reason.decode('latin-1')) diff --git a/hackaton/lib/python3.12/site-packages/httptools/parser/python.pxd b/hackaton/lib/python3.12/site-packages/httptools/parser/python.pxd new file mode 100644 index 0000000..8e95925 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/httptools/parser/python.pxd @@ -0,0 +1,6 @@ +cimport cpython + + +cdef extern from "Python.h": + cpython.Py_buffer* PyMemoryView_GET_BUFFER(object) + bint PyMemoryView_Check(object) diff --git a/hackaton/lib/python3.12/site-packages/httptools/parser/url_cparser.pxd b/hackaton/lib/python3.12/site-packages/httptools/parser/url_cparser.pxd new file mode 100644 index 0000000..ab9265a --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/httptools/parser/url_cparser.pxd @@ -0,0 +1,31 @@ +from libc.stdint cimport uint16_t + + +cdef extern from "http_parser.h": + # URL Parser + + enum http_parser_url_fields: + UF_SCHEMA = 0, + UF_HOST = 1, + UF_PORT = 2, + UF_PATH = 3, + UF_QUERY = 4, + UF_FRAGMENT = 5, + UF_USERINFO = 6, + UF_MAX = 7 + + struct http_parser_url_field_data: + uint16_t off + uint16_t len + + struct http_parser_url: + uint16_t field_set + uint16_t port + http_parser_url_field_data[UF_MAX] field_data + + void http_parser_url_init(http_parser_url *u) + + int http_parser_parse_url(const char *buf, + size_t buflen, + int is_connect, + http_parser_url *u) diff --git a/hackaton/lib/python3.12/site-packages/httptools/parser/url_parser.cpython-312-darwin.so b/hackaton/lib/python3.12/site-packages/httptools/parser/url_parser.cpython-312-darwin.so new file mode 100755 index 0000000..15a047a Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/httptools/parser/url_parser.cpython-312-darwin.so differ diff --git a/hackaton/lib/python3.12/site-packages/httptools/parser/url_parser.pyx b/hackaton/lib/python3.12/site-packages/httptools/parser/url_parser.pyx new file mode 100644 index 0000000..49908f3 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/httptools/parser/url_parser.pyx @@ -0,0 +1,108 @@ +#cython: language_level=3 + +from __future__ import print_function +from cpython.mem cimport PyMem_Malloc, PyMem_Free +from cpython cimport PyObject_GetBuffer, PyBuffer_Release, PyBUF_SIMPLE, \ + Py_buffer + +from .errors import HttpParserInvalidURLError + +cimport cython +from . cimport url_cparser as uparser + +__all__ = ('parse_url',) + +@cython.freelist(250) +cdef class URL: + cdef readonly bytes schema + cdef readonly bytes host + cdef readonly object port + cdef readonly bytes path + cdef readonly bytes query + cdef readonly bytes fragment + cdef readonly bytes userinfo + + def __cinit__(self, bytes schema, bytes host, object port, bytes path, + bytes query, bytes fragment, bytes userinfo): + + self.schema = schema + self.host = host + self.port = port + self.path = path + self.query = query + self.fragment = fragment + self.userinfo = userinfo + + def __repr__(self): + return ('' + .format(self.schema, self.host, self.port, self.path, + self.query, self.fragment, self.userinfo)) + + +def parse_url(url): + cdef: + Py_buffer py_buf + char* buf_data + uparser.http_parser_url* parsed + int res + bytes schema = None + bytes host = None + object port = None + bytes path = None + bytes query = None + bytes fragment = None + bytes userinfo = None + object result = None + int off + int ln + + parsed = \ + PyMem_Malloc(sizeof(uparser.http_parser_url)) + uparser.http_parser_url_init(parsed) + + PyObject_GetBuffer(url, &py_buf, PyBUF_SIMPLE) + try: + buf_data = py_buf.buf + res = uparser.http_parser_parse_url(buf_data, py_buf.len, 0, parsed) + + if res == 0: + if parsed.field_set & (1 << uparser.UF_SCHEMA): + off = parsed.field_data[uparser.UF_SCHEMA].off + ln = parsed.field_data[uparser.UF_SCHEMA].len + schema = buf_data[off:off+ln] + + if parsed.field_set & (1 << uparser.UF_HOST): + off = parsed.field_data[uparser.UF_HOST].off + ln = parsed.field_data[uparser.UF_HOST].len + host = buf_data[off:off+ln] + + if parsed.field_set & (1 << uparser.UF_PORT): + port = parsed.port + + if parsed.field_set & (1 << uparser.UF_PATH): + off = parsed.field_data[uparser.UF_PATH].off + ln = parsed.field_data[uparser.UF_PATH].len + path = buf_data[off:off+ln] + + if parsed.field_set & (1 << uparser.UF_QUERY): + off = parsed.field_data[uparser.UF_QUERY].off + ln = parsed.field_data[uparser.UF_QUERY].len + query = buf_data[off:off+ln] + + if parsed.field_set & (1 << uparser.UF_FRAGMENT): + off = parsed.field_data[uparser.UF_FRAGMENT].off + ln = parsed.field_data[uparser.UF_FRAGMENT].len + fragment = buf_data[off:off+ln] + + if parsed.field_set & (1 << uparser.UF_USERINFO): + off = parsed.field_data[uparser.UF_USERINFO].off + ln = parsed.field_data[uparser.UF_USERINFO].len + userinfo = buf_data[off:off+ln] + + return URL(schema, host, port, path, query, fragment, userinfo) + else: + raise HttpParserInvalidURLError("invalid url {!r}".format(url)) + finally: + PyBuffer_Release(&py_buf) + PyMem_Free(parsed) diff --git a/hackaton/lib/python3.12/site-packages/pydub-0.25.1.dist-info/AUTHORS b/hackaton/lib/python3.12/site-packages/pydub-0.25.1.dist-info/AUTHORS new file mode 100644 index 0000000..eeed788 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/pydub-0.25.1.dist-info/AUTHORS @@ -0,0 +1,98 @@ +James Robert + github: jiaaro + twitter: @jiaaro + web: jiaaro.com + email: pydub@jiaaro.com + +Marc Webbie + github: marcwebbie + +Jean-philippe Serafin + github: jeanphix + +Anurag Ramdasan + github: AnuragRamdasan + +Choongmin Lee + github: clee704 + +Patrick Pittman + github: ptpittman + +Hunter Lang + github: hunterlang + +Alexey + github: nihisil + +Jaymz Campbell + github: jaymzcd + +Ross McFarland + github: ross + +John McMellen + github: jmcmellen + +Johan Lövgren + github: dashj + +Joachim Krüger + github: jkrgr + +Shichao An + github: shichao-an + +Michael Bortnyck + github: mbortnyck + +André Cloete + github: aj-cloete + +David Acacio + github: dacacioa + +Thiago Abdnur + github: bolaum + +Aurélien Ooms + github: aureooms + +Mike Mattozzi + github: mmattozzi + +Marcio Mazza + github: marciomazza + +Sungsu Lim + github: proflim + +Evandro Myller + github: emyller + +Sérgio Agostinho + github: SergioRAgostinho + +Antonio Larrosa + github: antlarr + +Aaron Craig + github: craigthelinguist + +Carlos del Castillo + github: greyalien502 + +Yudong Sun + github: sunjerry019 + +Jorge Perianez + github: JPery + +Chendi Luo + github: Creonalia + +Daniel Lefevre + gitHub: dplefevre + +Grzegorz Kotfis + github: gkotfis \ No newline at end of file diff --git a/hackaton/lib/python3.12/site-packages/pydub-0.25.1.dist-info/INSTALLER b/hackaton/lib/python3.12/site-packages/pydub-0.25.1.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/pydub-0.25.1.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/hackaton/lib/python3.12/site-packages/pydub-0.25.1.dist-info/LICENSE b/hackaton/lib/python3.12/site-packages/pydub-0.25.1.dist-info/LICENSE new file mode 100644 index 0000000..0cb49b7 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/pydub-0.25.1.dist-info/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2011 James Robert, http://jiaaro.com + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/hackaton/lib/python3.12/site-packages/pydub-0.25.1.dist-info/METADATA b/hackaton/lib/python3.12/site-packages/pydub-0.25.1.dist-info/METADATA new file mode 100644 index 0000000..a34f12a --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/pydub-0.25.1.dist-info/METADATA @@ -0,0 +1,37 @@ +Metadata-Version: 2.1 +Name: pydub +Version: 0.25.1 +Summary: Manipulate audio with an simple and easy high level interface +Home-page: http://pydub.com +Author: James Robert +Author-email: jiaaro@gmail.com +License: MIT +Keywords: audio sound high-level +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: License :: OSI Approved :: MIT License +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Intended Audience :: Developers +Classifier: Operating System :: OS Independent +Classifier: Topic :: Multimedia :: Sound/Audio +Classifier: Topic :: Multimedia :: Sound/Audio :: Analysis +Classifier: Topic :: Multimedia :: Sound/Audio :: Conversion +Classifier: Topic :: Multimedia :: Sound/Audio :: Editors +Classifier: Topic :: Multimedia :: Sound/Audio :: Mixers +Classifier: Topic :: Software Development :: Libraries +Classifier: Topic :: Utilities + + +Manipulate audio with an simple and easy high level interface. + +See the README file for details, usage info, and a list of gotchas. + + diff --git a/hackaton/lib/python3.12/site-packages/pydub-0.25.1.dist-info/RECORD b/hackaton/lib/python3.12/site-packages/pydub-0.25.1.dist-info/RECORD new file mode 100644 index 0000000..968576a --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/pydub-0.25.1.dist-info/RECORD @@ -0,0 +1,30 @@ +pydub-0.25.1.dist-info/AUTHORS,sha256=AyY2PS9I2enOyBnUnxcpeAX-NnMNWLQT4yDtg8IIy78,1250 +pydub-0.25.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +pydub-0.25.1.dist-info/LICENSE,sha256=roVlNiJMx6OJ6Wh3H8XyWYFL3Q2mNTnPcigq2672iXo,1074 +pydub-0.25.1.dist-info/METADATA,sha256=f0M8_ZVtbiYoUI9ejXIeJ03Jo9A5Nbi-0V1bVqs5iYk,1406 +pydub-0.25.1.dist-info/RECORD,, +pydub-0.25.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pydub-0.25.1.dist-info/WHEEL,sha256=Z-nyYpwrcSqxfdux5Mbn_DQ525iP7J2DG3JgGvOYyTQ,110 +pydub-0.25.1.dist-info/top_level.txt,sha256=PHhiDCQVZdycZxfKL2lQozruBT6ZhvyZAwqjRrw3t0w,6 +pydub/__init__.py,sha256=w1Xv1awbaR3fMhTNE1-grnfswgARTNQrKpBzfZ--VBA,39 +pydub/__pycache__/__init__.cpython-312.pyc,, +pydub/__pycache__/audio_segment.cpython-312.pyc,, +pydub/__pycache__/effects.cpython-312.pyc,, +pydub/__pycache__/exceptions.cpython-312.pyc,, +pydub/__pycache__/generators.cpython-312.pyc,, +pydub/__pycache__/logging_utils.cpython-312.pyc,, +pydub/__pycache__/playback.cpython-312.pyc,, +pydub/__pycache__/pyaudioop.cpython-312.pyc,, +pydub/__pycache__/scipy_effects.cpython-312.pyc,, +pydub/__pycache__/silence.cpython-312.pyc,, +pydub/__pycache__/utils.cpython-312.pyc,, +pydub/audio_segment.py,sha256=Nf5VkHGY1v9Jqb7NtEYfwRpLrfqusfBdPGOZsi7R5Cg,49185 +pydub/effects.py,sha256=1HUMzhefrwG_E1rTnzvbl-P0-KNuwHklCnu8QCGS7jA,11507 +pydub/exceptions.py,sha256=osgXoUujwpH8K6hr80iYpW30CMBDFwqyaRD-5d7ZpKs,455 +pydub/generators.py,sha256=u6q7J8JLOY-uEZqMPUTzakxyua3XNQcPiDsuiK2-lLA,4045 +pydub/logging_utils.py,sha256=WuSqfzn4zyT7PxXHGV-PXMDynufeM6sC6eSmVlGX2RU,374 +pydub/playback.py,sha256=zFngVclUL_7oDipjzKC8b7jToPNV11DV28rGyH8pio0,1987 +pydub/pyaudioop.py,sha256=Dp_cQgAyYjD4OV2ZHuxtKI2KABuPi9YYNRUF8giR80Q,13094 +pydub/scipy_effects.py,sha256=U2p8AQuVreTp5MrtUAzRbWgOHUc6Dwq0TAG_RtEg-7g,6637 +pydub/silence.py,sha256=F6MV0VlaO6mtuisjLGks_UR-GVmzO1v87_NKvzwRc30,6457 +pydub/utils.py,sha256=W71pgJFbbNP3adH63yn0Eo0CLLVgzXG7WHYSXpWvdyc,12368 diff --git a/hackaton/lib/python3.12/site-packages/pydub-0.25.1.dist-info/REQUESTED b/hackaton/lib/python3.12/site-packages/pydub-0.25.1.dist-info/REQUESTED new file mode 100644 index 0000000..e69de29 diff --git a/hackaton/lib/python3.12/site-packages/pydub-0.25.1.dist-info/WHEEL b/hackaton/lib/python3.12/site-packages/pydub-0.25.1.dist-info/WHEEL new file mode 100644 index 0000000..01b8fc7 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/pydub-0.25.1.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.36.2) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/hackaton/lib/python3.12/site-packages/pydub-0.25.1.dist-info/top_level.txt b/hackaton/lib/python3.12/site-packages/pydub-0.25.1.dist-info/top_level.txt new file mode 100644 index 0000000..0a0320e --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/pydub-0.25.1.dist-info/top_level.txt @@ -0,0 +1 @@ +pydub diff --git a/hackaton/lib/python3.12/site-packages/pydub/__init__.py b/hackaton/lib/python3.12/site-packages/pydub/__init__.py new file mode 100644 index 0000000..65e30b4 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/pydub/__init__.py @@ -0,0 +1 @@ +from .audio_segment import AudioSegment \ No newline at end of file diff --git a/hackaton/lib/python3.12/site-packages/pydub/__pycache__/__init__.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/pydub/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..76bdef1 Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/pydub/__pycache__/__init__.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/pydub/__pycache__/audio_segment.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/pydub/__pycache__/audio_segment.cpython-312.pyc new file mode 100644 index 0000000..a6df1c5 Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/pydub/__pycache__/audio_segment.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/pydub/__pycache__/effects.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/pydub/__pycache__/effects.cpython-312.pyc new file mode 100644 index 0000000..0991727 Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/pydub/__pycache__/effects.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/pydub/__pycache__/exceptions.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/pydub/__pycache__/exceptions.cpython-312.pyc new file mode 100644 index 0000000..138bcc7 Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/pydub/__pycache__/exceptions.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/pydub/__pycache__/generators.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/pydub/__pycache__/generators.cpython-312.pyc new file mode 100644 index 0000000..77d63f5 Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/pydub/__pycache__/generators.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/pydub/__pycache__/logging_utils.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/pydub/__pycache__/logging_utils.cpython-312.pyc new file mode 100644 index 0000000..25849dd Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/pydub/__pycache__/logging_utils.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/pydub/__pycache__/playback.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/pydub/__pycache__/playback.cpython-312.pyc new file mode 100644 index 0000000..a7faa34 Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/pydub/__pycache__/playback.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/pydub/__pycache__/pyaudioop.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/pydub/__pycache__/pyaudioop.cpython-312.pyc new file mode 100644 index 0000000..6386146 Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/pydub/__pycache__/pyaudioop.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/pydub/__pycache__/scipy_effects.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/pydub/__pycache__/scipy_effects.cpython-312.pyc new file mode 100644 index 0000000..f424815 Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/pydub/__pycache__/scipy_effects.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/pydub/__pycache__/silence.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/pydub/__pycache__/silence.cpython-312.pyc new file mode 100644 index 0000000..35754cd Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/pydub/__pycache__/silence.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/pydub/__pycache__/utils.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/pydub/__pycache__/utils.cpython-312.pyc new file mode 100644 index 0000000..cdf9701 Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/pydub/__pycache__/utils.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/pydub/audio_segment.py b/hackaton/lib/python3.12/site-packages/pydub/audio_segment.py new file mode 100644 index 0000000..14ea46e --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/pydub/audio_segment.py @@ -0,0 +1,1399 @@ +from __future__ import division + +import array +import os +import subprocess +from tempfile import TemporaryFile, NamedTemporaryFile +import wave +import sys +import struct +from .logging_utils import log_conversion, log_subprocess_output +from .utils import mediainfo_json, fsdecode +import base64 +from collections import namedtuple + +try: + from StringIO import StringIO +except: + from io import StringIO + +from io import BytesIO + +try: + from itertools import izip +except: + izip = zip + +from .utils import ( + _fd_or_path_or_tempfile, + db_to_float, + ratio_to_db, + get_encoder_name, + get_array_type, + audioop, +) +from .exceptions import ( + TooManyMissingFrames, + InvalidDuration, + InvalidID3TagVersion, + InvalidTag, + CouldntDecodeError, + CouldntEncodeError, + MissingAudioParameter, +) + +if sys.version_info >= (3, 0): + basestring = str + xrange = range + StringIO = BytesIO + + +class ClassPropertyDescriptor(object): + + def __init__(self, fget, fset=None): + self.fget = fget + self.fset = fset + + def __get__(self, obj, klass=None): + if klass is None: + klass = type(obj) + return self.fget.__get__(obj, klass)() + + def __set__(self, obj, value): + if not self.fset: + raise AttributeError("can't set attribute") + type_ = type(obj) + return self.fset.__get__(obj, type_)(value) + + def setter(self, func): + if not isinstance(func, (classmethod, staticmethod)): + func = classmethod(func) + self.fset = func + return self + + +def classproperty(func): + if not isinstance(func, (classmethod, staticmethod)): + func = classmethod(func) + + return ClassPropertyDescriptor(func) + + +AUDIO_FILE_EXT_ALIASES = { + "m4a": "mp4", + "wave": "wav", +} + +WavSubChunk = namedtuple('WavSubChunk', ['id', 'position', 'size']) +WavData = namedtuple('WavData', ['audio_format', 'channels', 'sample_rate', + 'bits_per_sample', 'raw_data']) + + +def extract_wav_headers(data): + # def search_subchunk(data, subchunk_id): + pos = 12 # The size of the RIFF chunk descriptor + subchunks = [] + while pos + 8 <= len(data) and len(subchunks) < 10: + subchunk_id = data[pos:pos + 4] + subchunk_size = struct.unpack_from(' 2**32: + raise CouldntDecodeError("Unable to process >4GB files") + + # Set the file size in the RIFF chunk descriptor + data[4:8] = struct.pack(' b'\x7f'[0]]) + old_bytes = struct.pack(pack_fmt, b0, b1, b2) + byte_buffer.write(old_bytes) + + self._data = byte_buffer.getvalue() + self.sample_width = 4 + self.frame_width = self.channels * self.sample_width + + super(AudioSegment, self).__init__(*args, **kwargs) + + @property + def raw_data(self): + """ + public access to the raw audio data as a bytestring + """ + return self._data + + def get_array_of_samples(self, array_type_override=None): + """ + returns the raw_data as an array of samples + """ + if array_type_override is None: + array_type_override = self.array_type + return array.array(array_type_override, self._data) + + @property + def array_type(self): + return get_array_type(self.sample_width * 8) + + def __len__(self): + """ + returns the length of this audio segment in milliseconds + """ + return round(1000 * (self.frame_count() / self.frame_rate)) + + def __eq__(self, other): + try: + return self._data == other._data + except: + return False + + def __hash__(self): + return hash(AudioSegment) ^ hash((self.channels, self.frame_rate, self.sample_width, self._data)) + + def __ne__(self, other): + return not (self == other) + + def __iter__(self): + return (self[i] for i in xrange(len(self))) + + def __getitem__(self, millisecond): + if isinstance(millisecond, slice): + if millisecond.step: + return ( + self[i:i + millisecond.step] + for i in xrange(*millisecond.indices(len(self))) + ) + + start = millisecond.start if millisecond.start is not None else 0 + end = millisecond.stop if millisecond.stop is not None \ + else len(self) + + start = min(start, len(self)) + end = min(end, len(self)) + else: + start = millisecond + end = millisecond + 1 + + start = self._parse_position(start) * self.frame_width + end = self._parse_position(end) * self.frame_width + data = self._data[start:end] + + # ensure the output is as long as the requester is expecting + expected_length = end - start + missing_frames = (expected_length - len(data)) // self.frame_width + if missing_frames: + if missing_frames > self.frame_count(ms=2): + raise TooManyMissingFrames( + "You should never be filling in " + " more than 2 ms with silence here, " + "missing frames: %s" % missing_frames) + silence = audioop.mul(data[:self.frame_width], + self.sample_width, 0) + data += (silence * missing_frames) + + return self._spawn(data) + + def get_sample_slice(self, start_sample=None, end_sample=None): + """ + Get a section of the audio segment by sample index. + + NOTE: Negative indices do *not* address samples backword + from the end of the audio segment like a python list. + This is intentional. + """ + max_val = int(self.frame_count()) + + def bounded(val, default): + if val is None: + return default + if val < 0: + return 0 + if val > max_val: + return max_val + return val + + start_i = bounded(start_sample, 0) * self.frame_width + end_i = bounded(end_sample, max_val) * self.frame_width + + data = self._data[start_i:end_i] + return self._spawn(data) + + def __add__(self, arg): + if isinstance(arg, AudioSegment): + return self.append(arg, crossfade=0) + else: + return self.apply_gain(arg) + + def __radd__(self, rarg): + """ + Permit use of sum() builtin with an iterable of AudioSegments + """ + if rarg == 0: + return self + raise TypeError("Gains must be the second addend after the " + "AudioSegment") + + def __sub__(self, arg): + if isinstance(arg, AudioSegment): + raise TypeError("AudioSegment objects can't be subtracted from " + "each other") + else: + return self.apply_gain(-arg) + + def __mul__(self, arg): + """ + If the argument is an AudioSegment, overlay the multiplied audio + segment. + + If it's a number, just use the string multiply operation to repeat the + audio. + + The following would return an AudioSegment that contains the + audio of audio_seg eight times + + `audio_seg * 8` + """ + if isinstance(arg, AudioSegment): + return self.overlay(arg, position=0, loop=True) + else: + return self._spawn(data=self._data * arg) + + def _spawn(self, data, overrides={}): + """ + Creates a new audio segment using the metadata from the current one + and the data passed in. Should be used whenever an AudioSegment is + being returned by an operation that would alters the current one, + since AudioSegment objects are immutable. + """ + # accept lists of data chunks + if isinstance(data, list): + data = b''.join(data) + + if isinstance(data, array.array): + try: + data = data.tobytes() + except: + data = data.tostring() + + # accept file-like objects + if hasattr(data, 'read'): + if hasattr(data, 'seek'): + data.seek(0) + data = data.read() + + metadata = { + 'sample_width': self.sample_width, + 'frame_rate': self.frame_rate, + 'frame_width': self.frame_width, + 'channels': self.channels + } + metadata.update(overrides) + return self.__class__(data=data, metadata=metadata) + + @classmethod + def _sync(cls, *segs): + channels = max(seg.channels for seg in segs) + frame_rate = max(seg.frame_rate for seg in segs) + sample_width = max(seg.sample_width for seg in segs) + + return tuple( + seg.set_channels(channels).set_frame_rate(frame_rate).set_sample_width(sample_width) + for seg in segs + ) + + def _parse_position(self, val): + if val < 0: + val = len(self) - abs(val) + val = self.frame_count(ms=len(self)) if val == float("inf") else \ + self.frame_count(ms=val) + return int(val) + + @classmethod + def empty(cls): + return cls(b'', metadata={ + "channels": 1, + "sample_width": 1, + "frame_rate": 1, + "frame_width": 1 + }) + + @classmethod + def silent(cls, duration=1000, frame_rate=11025): + """ + Generate a silent audio segment. + duration specified in milliseconds (default duration: 1000ms, default frame_rate: 11025). + """ + frames = int(frame_rate * (duration / 1000.0)) + data = b"\0\0" * frames + return cls(data, metadata={"channels": 1, + "sample_width": 2, + "frame_rate": frame_rate, + "frame_width": 2}) + + @classmethod + def from_mono_audiosegments(cls, *mono_segments): + if not len(mono_segments): + raise ValueError("At least one AudioSegment instance is required") + + segs = cls._sync(*mono_segments) + + if segs[0].channels != 1: + raise ValueError( + "AudioSegment.from_mono_audiosegments requires all arguments are mono AudioSegment instances") + + channels = len(segs) + sample_width = segs[0].sample_width + frame_rate = segs[0].frame_rate + + frame_count = max(int(seg.frame_count()) for seg in segs) + data = array.array( + segs[0].array_type, + b'\0' * (frame_count * sample_width * channels) + ) + + for i, seg in enumerate(segs): + data[i::channels] = seg.get_array_of_samples() + + return cls( + data, + channels=channels, + sample_width=sample_width, + frame_rate=frame_rate, + ) + + @classmethod + def from_file_using_temporary_files(cls, file, format=None, codec=None, parameters=None, start_second=None, duration=None, **kwargs): + orig_file = file + file, close_file = _fd_or_path_or_tempfile(file, 'rb', tempfile=False) + + if format: + format = format.lower() + format = AUDIO_FILE_EXT_ALIASES.get(format, format) + + def is_format(f): + f = f.lower() + if format == f: + return True + if isinstance(orig_file, basestring): + return orig_file.lower().endswith(".{0}".format(f)) + if isinstance(orig_file, bytes): + return orig_file.lower().endswith((".{0}".format(f)).encode('utf8')) + return False + + if is_format("wav"): + try: + obj = cls._from_safe_wav(file) + if close_file: + file.close() + if start_second is None and duration is None: + return obj + elif start_second is not None and duration is None: + return obj[start_second*1000:] + elif start_second is None and duration is not None: + return obj[:duration*1000] + else: + return obj[start_second*1000:(start_second+duration)*1000] + except: + file.seek(0) + elif is_format("raw") or is_format("pcm"): + sample_width = kwargs['sample_width'] + frame_rate = kwargs['frame_rate'] + channels = kwargs['channels'] + metadata = { + 'sample_width': sample_width, + 'frame_rate': frame_rate, + 'channels': channels, + 'frame_width': channels * sample_width + } + obj = cls(data=file.read(), metadata=metadata) + if close_file: + file.close() + if start_second is None and duration is None: + return obj + elif start_second is not None and duration is None: + return obj[start_second * 1000:] + elif start_second is None and duration is not None: + return obj[:duration * 1000] + else: + return obj[start_second * 1000:(start_second + duration) * 1000] + + input_file = NamedTemporaryFile(mode='wb', delete=False) + try: + input_file.write(file.read()) + except(OSError): + input_file.flush() + input_file.close() + input_file = NamedTemporaryFile(mode='wb', delete=False, buffering=2 ** 31 - 1) + if close_file: + file.close() + close_file = True + file = open(orig_file, buffering=2 ** 13 - 1, mode='rb') + reader = file.read(2 ** 31 - 1) + while reader: + input_file.write(reader) + reader = file.read(2 ** 31 - 1) + input_file.flush() + if close_file: + file.close() + + output = NamedTemporaryFile(mode="rb", delete=False) + + conversion_command = [cls.converter, + '-y', # always overwrite existing files + ] + + # If format is not defined + # ffmpeg/avconv will detect it automatically + if format: + conversion_command += ["-f", format] + + if codec: + # force audio decoder + conversion_command += ["-acodec", codec] + + conversion_command += [ + "-i", input_file.name, # input_file options (filename last) + "-vn", # Drop any video streams if there are any + "-f", "wav" # output options (filename last) + ] + + if start_second is not None: + conversion_command += ["-ss", str(start_second)] + + if duration is not None: + conversion_command += ["-t", str(duration)] + + conversion_command += [output.name] + + if parameters is not None: + # extend arguments with arbitrary set + conversion_command.extend(parameters) + + log_conversion(conversion_command) + + with open(os.devnull, 'rb') as devnull: + p = subprocess.Popen(conversion_command, stdin=devnull, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + p_out, p_err = p.communicate() + + log_subprocess_output(p_out) + log_subprocess_output(p_err) + + try: + if p.returncode != 0: + raise CouldntDecodeError( + "Decoding failed. ffmpeg returned error code: {0}\n\nOutput from ffmpeg/avlib:\n\n{1}".format( + p.returncode, p_err.decode(errors='ignore') )) + obj = cls._from_safe_wav(output) + finally: + input_file.close() + output.close() + os.unlink(input_file.name) + os.unlink(output.name) + + if start_second is None and duration is None: + return obj + elif start_second is not None and duration is None: + return obj[0:] + elif start_second is None and duration is not None: + return obj[:duration * 1000] + else: + return obj[0:duration * 1000] + + + @classmethod + def from_file(cls, file, format=None, codec=None, parameters=None, start_second=None, duration=None, **kwargs): + orig_file = file + try: + filename = fsdecode(file) + except TypeError: + filename = None + file, close_file = _fd_or_path_or_tempfile(file, 'rb', tempfile=False) + + if format: + format = format.lower() + format = AUDIO_FILE_EXT_ALIASES.get(format, format) + + def is_format(f): + f = f.lower() + if format == f: + return True + + if filename: + return filename.lower().endswith(".{0}".format(f)) + + return False + + if is_format("wav"): + try: + if start_second is None and duration is None: + return cls._from_safe_wav(file) + elif start_second is not None and duration is None: + return cls._from_safe_wav(file)[start_second*1000:] + elif start_second is None and duration is not None: + return cls._from_safe_wav(file)[:duration*1000] + else: + return cls._from_safe_wav(file)[start_second*1000:(start_second+duration)*1000] + except: + file.seek(0) + elif is_format("raw") or is_format("pcm"): + sample_width = kwargs['sample_width'] + frame_rate = kwargs['frame_rate'] + channels = kwargs['channels'] + metadata = { + 'sample_width': sample_width, + 'frame_rate': frame_rate, + 'channels': channels, + 'frame_width': channels * sample_width + } + if start_second is None and duration is None: + return cls(data=file.read(), metadata=metadata) + elif start_second is not None and duration is None: + return cls(data=file.read(), metadata=metadata)[start_second*1000:] + elif start_second is None and duration is not None: + return cls(data=file.read(), metadata=metadata)[:duration*1000] + else: + return cls(data=file.read(), metadata=metadata)[start_second*1000:(start_second+duration)*1000] + + conversion_command = [cls.converter, + '-y', # always overwrite existing files + ] + + # If format is not defined + # ffmpeg/avconv will detect it automatically + if format: + conversion_command += ["-f", format] + + if codec: + # force audio decoder + conversion_command += ["-acodec", codec] + + read_ahead_limit = kwargs.get('read_ahead_limit', -1) + if filename: + conversion_command += ["-i", filename] + stdin_parameter = None + stdin_data = None + else: + if cls.converter == 'ffmpeg': + conversion_command += ["-read_ahead_limit", str(read_ahead_limit), + "-i", "cache:pipe:0"] + else: + conversion_command += ["-i", "-"] + stdin_parameter = subprocess.PIPE + stdin_data = file.read() + + if codec: + info = None + else: + info = mediainfo_json(orig_file, read_ahead_limit=read_ahead_limit) + if info: + audio_streams = [x for x in info['streams'] + if x['codec_type'] == 'audio'] + # This is a workaround for some ffprobe versions that always say + # that mp3/mp4/aac/webm/ogg files contain fltp samples + audio_codec = audio_streams[0].get('codec_name') + if (audio_streams[0].get('sample_fmt') == 'fltp' and + audio_codec in ['mp3', 'mp4', 'aac', 'webm', 'ogg']): + bits_per_sample = 16 + else: + bits_per_sample = audio_streams[0]['bits_per_sample'] + if bits_per_sample == 8: + acodec = 'pcm_u8' + else: + acodec = 'pcm_s%dle' % bits_per_sample + + conversion_command += ["-acodec", acodec] + + conversion_command += [ + "-vn", # Drop any video streams if there are any + "-f", "wav" # output options (filename last) + ] + + if start_second is not None: + conversion_command += ["-ss", str(start_second)] + + if duration is not None: + conversion_command += ["-t", str(duration)] + + conversion_command += ["-"] + + if parameters is not None: + # extend arguments with arbitrary set + conversion_command.extend(parameters) + + log_conversion(conversion_command) + + p = subprocess.Popen(conversion_command, stdin=stdin_parameter, + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + p_out, p_err = p.communicate(input=stdin_data) + + if p.returncode != 0 or len(p_out) == 0: + if close_file: + file.close() + raise CouldntDecodeError( + "Decoding failed. ffmpeg returned error code: {0}\n\nOutput from ffmpeg/avlib:\n\n{1}".format( + p.returncode, p_err.decode(errors='ignore') )) + + p_out = bytearray(p_out) + fix_wav_headers(p_out) + p_out = bytes(p_out) + obj = cls(p_out) + + if close_file: + file.close() + + if start_second is None and duration is None: + return obj + elif start_second is not None and duration is None: + return obj[0:] + elif start_second is None and duration is not None: + return obj[:duration * 1000] + else: + return obj[0:duration * 1000] + + @classmethod + def from_mp3(cls, file, parameters=None): + return cls.from_file(file, 'mp3', parameters=parameters) + + @classmethod + def from_flv(cls, file, parameters=None): + return cls.from_file(file, 'flv', parameters=parameters) + + @classmethod + def from_ogg(cls, file, parameters=None): + return cls.from_file(file, 'ogg', parameters=parameters) + + @classmethod + def from_wav(cls, file, parameters=None): + return cls.from_file(file, 'wav', parameters=parameters) + + @classmethod + def from_raw(cls, file, **kwargs): + return cls.from_file(file, 'raw', sample_width=kwargs['sample_width'], frame_rate=kwargs['frame_rate'], + channels=kwargs['channels']) + + @classmethod + def _from_safe_wav(cls, file): + file, close_file = _fd_or_path_or_tempfile(file, 'rb', tempfile=False) + file.seek(0) + obj = cls(data=file) + if close_file: + file.close() + return obj + + def export(self, out_f=None, format='mp3', codec=None, bitrate=None, parameters=None, tags=None, id3v2_version='4', + cover=None): + """ + Export an AudioSegment to a file with given options + + out_f (string): + Path to destination audio file. Also accepts os.PathLike objects on + python >= 3.6 + + format (string) + Format for destination audio file. + ('mp3', 'wav', 'raw', 'ogg' or other ffmpeg/avconv supported files) + + codec (string) + Codec used to encode the destination file. + + bitrate (string) + Bitrate used when encoding destination file. (64, 92, 128, 256, 312k...) + Each codec accepts different bitrate arguments so take a look at the + ffmpeg documentation for details (bitrate usually shown as -b, -ba or + -a:b). + + parameters (list of strings) + Aditional ffmpeg/avconv parameters + + tags (dict) + Set metadata information to destination files + usually used as tags. ({title='Song Title', artist='Song Artist'}) + + id3v2_version (string) + Set ID3v2 version for tags. (default: '4') + + cover (file) + Set cover for audio file from image file. (png or jpg) + """ + id3v2_allowed_versions = ['3', '4'] + + if format == "raw" and (codec is not None or parameters is not None): + raise AttributeError( + 'Can not invoke ffmpeg when export format is "raw"; ' + 'specify an ffmpeg raw format like format="s16le" instead ' + 'or call export(format="raw") with no codec or parameters') + + out_f, _ = _fd_or_path_or_tempfile(out_f, 'wb+') + out_f.seek(0) + + if format == "raw": + out_f.write(self._data) + out_f.seek(0) + return out_f + + # wav with no ffmpeg parameters can just be written directly to out_f + easy_wav = format == "wav" and codec is None and parameters is None + + if easy_wav: + data = out_f + else: + data = NamedTemporaryFile(mode="wb", delete=False) + + pcm_for_wav = self._data + if self.sample_width == 1: + # convert to unsigned integers for wav + pcm_for_wav = audioop.bias(self._data, 1, 128) + + wave_data = wave.open(data, 'wb') + wave_data.setnchannels(self.channels) + wave_data.setsampwidth(self.sample_width) + wave_data.setframerate(self.frame_rate) + # For some reason packing the wave header struct with + # a float in python 2 doesn't throw an exception + wave_data.setnframes(int(self.frame_count())) + wave_data.writeframesraw(pcm_for_wav) + wave_data.close() + + # for easy wav files, we're done (wav data is written directly to out_f) + if easy_wav: + out_f.seek(0) + return out_f + + output = NamedTemporaryFile(mode="w+b", delete=False) + + # build converter command to export + conversion_command = [ + self.converter, + '-y', # always overwrite existing files + "-f", "wav", "-i", data.name, # input options (filename last) + ] + + if codec is None: + codec = self.DEFAULT_CODECS.get(format, None) + + if cover is not None: + if cover.lower().endswith(('.png', '.jpg', '.jpeg', '.bmp', '.tif', '.tiff')) and format == "mp3": + conversion_command.extend(["-i", cover, "-map", "0", "-map", "1", "-c:v", "mjpeg"]) + else: + raise AttributeError( + "Currently cover images are only supported by MP3 files. The allowed image formats are: .tif, .jpg, .bmp, .jpeg and .png.") + + if codec is not None: + # force audio encoder + conversion_command.extend(["-acodec", codec]) + + if bitrate is not None: + conversion_command.extend(["-b:a", bitrate]) + + if parameters is not None: + # extend arguments with arbitrary set + conversion_command.extend(parameters) + + if tags is not None: + if not isinstance(tags, dict): + raise InvalidTag("Tags must be a dictionary.") + else: + # Extend converter command with tags + # print(tags) + for key, value in tags.items(): + conversion_command.extend( + ['-metadata', '{0}={1}'.format(key, value)]) + + if format == 'mp3': + # set id3v2 tag version + if id3v2_version not in id3v2_allowed_versions: + raise InvalidID3TagVersion( + "id3v2_version not allowed, allowed versions: %s" % id3v2_allowed_versions) + conversion_command.extend([ + "-id3v2_version", id3v2_version + ]) + + if sys.platform == 'darwin' and codec == 'mp3': + conversion_command.extend(["-write_xing", "0"]) + + conversion_command.extend([ + "-f", format, output.name, # output options (filename last) + ]) + + log_conversion(conversion_command) + + # read stdin / write stdout + with open(os.devnull, 'rb') as devnull: + p = subprocess.Popen(conversion_command, stdin=devnull, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + p_out, p_err = p.communicate() + + log_subprocess_output(p_out) + log_subprocess_output(p_err) + + if p.returncode != 0: + raise CouldntEncodeError( + "Encoding failed. ffmpeg/avlib returned error code: {0}\n\nCommand:{1}\n\nOutput from ffmpeg/avlib:\n\n{2}".format( + p.returncode, conversion_command, p_err.decode(errors='ignore') )) + + output.seek(0) + out_f.write(output.read()) + + data.close() + output.close() + + os.unlink(data.name) + os.unlink(output.name) + + out_f.seek(0) + return out_f + + def get_frame(self, index): + frame_start = index * self.frame_width + frame_end = frame_start + self.frame_width + return self._data[frame_start:frame_end] + + def frame_count(self, ms=None): + """ + returns the number of frames for the given number of milliseconds, or + if not specified, the number of frames in the whole AudioSegment + """ + if ms is not None: + return ms * (self.frame_rate / 1000.0) + else: + return float(len(self._data) // self.frame_width) + + def set_sample_width(self, sample_width): + if sample_width == self.sample_width: + return self + + frame_width = self.channels * sample_width + + return self._spawn( + audioop.lin2lin(self._data, self.sample_width, sample_width), + overrides={'sample_width': sample_width, 'frame_width': frame_width} + ) + + def set_frame_rate(self, frame_rate): + if frame_rate == self.frame_rate: + return self + + if self._data: + converted, _ = audioop.ratecv(self._data, self.sample_width, + self.channels, self.frame_rate, + frame_rate, None) + else: + converted = self._data + + return self._spawn(data=converted, + overrides={'frame_rate': frame_rate}) + + def set_channels(self, channels): + if channels == self.channels: + return self + + if channels == 2 and self.channels == 1: + fn = audioop.tostereo + frame_width = self.frame_width * 2 + fac = 1 + converted = fn(self._data, self.sample_width, fac, fac) + elif channels == 1 and self.channels == 2: + fn = audioop.tomono + frame_width = self.frame_width // 2 + fac = 0.5 + converted = fn(self._data, self.sample_width, fac, fac) + elif channels == 1: + channels_data = [seg.get_array_of_samples() for seg in self.split_to_mono()] + frame_count = int(self.frame_count()) + converted = array.array( + channels_data[0].typecode, + b'\0' * (frame_count * self.sample_width) + ) + for raw_channel_data in channels_data: + for i in range(frame_count): + converted[i] += raw_channel_data[i] // self.channels + frame_width = self.frame_width // self.channels + elif self.channels == 1: + dup_channels = [self for iChannel in range(channels)] + return AudioSegment.from_mono_audiosegments(*dup_channels) + else: + raise ValueError( + "AudioSegment.set_channels only supports mono-to-multi channel and multi-to-mono channel conversion") + + return self._spawn(data=converted, + overrides={ + 'channels': channels, + 'frame_width': frame_width}) + + def split_to_mono(self): + if self.channels == 1: + return [self] + + samples = self.get_array_of_samples() + + mono_channels = [] + for i in range(self.channels): + samples_for_current_channel = samples[i::self.channels] + + try: + mono_data = samples_for_current_channel.tobytes() + except AttributeError: + mono_data = samples_for_current_channel.tostring() + + mono_channels.append( + self._spawn(mono_data, overrides={"channels": 1, "frame_width": self.sample_width}) + ) + + return mono_channels + + @property + def rms(self): + return audioop.rms(self._data, self.sample_width) + + @property + def dBFS(self): + rms = self.rms + if not rms: + return -float("infinity") + return ratio_to_db(self.rms / self.max_possible_amplitude) + + @property + def max(self): + return audioop.max(self._data, self.sample_width) + + @property + def max_possible_amplitude(self): + bits = self.sample_width * 8 + max_possible_val = (2 ** bits) + + # since half is above 0 and half is below the max amplitude is divided + return max_possible_val / 2 + + @property + def max_dBFS(self): + return ratio_to_db(self.max, self.max_possible_amplitude) + + @property + def duration_seconds(self): + return self.frame_rate and self.frame_count() / self.frame_rate or 0.0 + + def get_dc_offset(self, channel=1): + """ + Returns a value between -1.0 and 1.0 representing the DC offset of a + channel (1 for left, 2 for right). + """ + if not 1 <= channel <= 2: + raise ValueError("channel value must be 1 (left) or 2 (right)") + + if self.channels == 1: + data = self._data + elif channel == 1: + data = audioop.tomono(self._data, self.sample_width, 1, 0) + else: + data = audioop.tomono(self._data, self.sample_width, 0, 1) + + return float(audioop.avg(data, self.sample_width)) / self.max_possible_amplitude + + def remove_dc_offset(self, channel=None, offset=None): + """ + Removes DC offset of given channel. Calculates offset if it's not given. + Offset values must be in range -1.0 to 1.0. If channel is None, removes + DC offset from all available channels. + """ + if channel and not 1 <= channel <= 2: + raise ValueError("channel value must be None, 1 (left) or 2 (right)") + + if offset and not -1.0 <= offset <= 1.0: + raise ValueError("offset value must be in range -1.0 to 1.0") + + if offset: + offset = int(round(offset * self.max_possible_amplitude)) + + def remove_data_dc(data, off): + if not off: + off = audioop.avg(data, self.sample_width) + return audioop.bias(data, self.sample_width, -off) + + if self.channels == 1: + return self._spawn(data=remove_data_dc(self._data, offset)) + + left_channel = audioop.tomono(self._data, self.sample_width, 1, 0) + right_channel = audioop.tomono(self._data, self.sample_width, 0, 1) + + if not channel or channel == 1: + left_channel = remove_data_dc(left_channel, offset) + + if not channel or channel == 2: + right_channel = remove_data_dc(right_channel, offset) + + left_channel = audioop.tostereo(left_channel, self.sample_width, 1, 0) + right_channel = audioop.tostereo(right_channel, self.sample_width, 0, 1) + + return self._spawn(data=audioop.add(left_channel, right_channel, + self.sample_width)) + + def apply_gain(self, volume_change): + return self._spawn(data=audioop.mul(self._data, self.sample_width, + db_to_float(float(volume_change)))) + + def overlay(self, seg, position=0, loop=False, times=None, gain_during_overlay=None): + """ + Overlay the provided segment on to this segment starting at the + specificed position and using the specfied looping beahvior. + + seg (AudioSegment): + The audio segment to overlay on to this one. + + position (optional int): + The position to start overlaying the provided segment in to this + one. + + loop (optional bool): + Loop seg as many times as necessary to match this segment's length. + Overrides loops param. + + times (optional int): + Loop seg the specified number of times or until it matches this + segment's length. 1 means once, 2 means twice, ... 0 would make the + call a no-op + gain_during_overlay (optional int): + Changes this segment's volume by the specified amount during the + duration of time that seg is overlaid on top of it. When negative, + this has the effect of 'ducking' the audio under the overlay. + """ + + if loop: + # match loop=True's behavior with new times (count) mechinism. + times = -1 + elif times is None: + # no times specified, just once through + times = 1 + elif times == 0: + # it's a no-op, make a copy since we never mutate + return self._spawn(self._data) + + output = StringIO() + + seg1, seg2 = AudioSegment._sync(self, seg) + sample_width = seg1.sample_width + spawn = seg1._spawn + + output.write(seg1[:position]._data) + + # drop down to the raw data + seg1 = seg1[position:]._data + seg2 = seg2._data + pos = 0 + seg1_len = len(seg1) + seg2_len = len(seg2) + while times: + remaining = max(0, seg1_len - pos) + if seg2_len >= remaining: + seg2 = seg2[:remaining] + seg2_len = remaining + # we've hit the end, we're done looping (if we were) and this + # is our last go-around + times = 1 + + if gain_during_overlay: + seg1_overlaid = seg1[pos:pos + seg2_len] + seg1_adjusted_gain = audioop.mul(seg1_overlaid, self.sample_width, + db_to_float(float(gain_during_overlay))) + output.write(audioop.add(seg1_adjusted_gain, seg2, sample_width)) + else: + output.write(audioop.add(seg1[pos:pos + seg2_len], seg2, + sample_width)) + pos += seg2_len + + # dec times to break our while loop (eventually) + times -= 1 + + output.write(seg1[pos:]) + + return spawn(data=output) + + def append(self, seg, crossfade=100): + seg1, seg2 = AudioSegment._sync(self, seg) + + if not crossfade: + return seg1._spawn(seg1._data + seg2._data) + elif crossfade > len(self): + raise ValueError("Crossfade is longer than the original AudioSegment ({}ms > {}ms)".format( + crossfade, len(self) + )) + elif crossfade > len(seg): + raise ValueError("Crossfade is longer than the appended AudioSegment ({}ms > {}ms)".format( + crossfade, len(seg) + )) + + xf = seg1[-crossfade:].fade(to_gain=-120, start=0, end=float('inf')) + xf *= seg2[:crossfade].fade(from_gain=-120, start=0, end=float('inf')) + + output = TemporaryFile() + + output.write(seg1[:-crossfade]._data) + output.write(xf._data) + output.write(seg2[crossfade:]._data) + + output.seek(0) + obj = seg1._spawn(data=output) + output.close() + return obj + + def fade(self, to_gain=0, from_gain=0, start=None, end=None, + duration=None): + """ + Fade the volume of this audio segment. + + to_gain (float): + resulting volume_change in db + + start (int): + default = beginning of the segment + when in this segment to start fading in milliseconds + + end (int): + default = end of the segment + when in this segment to start fading in milliseconds + + duration (int): + default = until the end of the audio segment + the duration of the fade + """ + if None not in [duration, end, start]: + raise TypeError('Only two of the three arguments, "start", ' + '"end", and "duration" may be specified') + + # no fade == the same audio + if to_gain == 0 and from_gain == 0: + return self + + start = min(len(self), start) if start is not None else None + end = min(len(self), end) if end is not None else None + + if start is not None and start < 0: + start += len(self) + if end is not None and end < 0: + end += len(self) + + if duration is not None and duration < 0: + raise InvalidDuration("duration must be a positive integer") + + if duration: + if start is not None: + end = start + duration + elif end is not None: + start = end - duration + else: + duration = end - start + + from_power = db_to_float(from_gain) + + output = [] + + # original data - up until the crossfade portion, as is + before_fade = self[:start]._data + if from_gain != 0: + before_fade = audioop.mul(before_fade, + self.sample_width, + from_power) + output.append(before_fade) + + gain_delta = db_to_float(to_gain) - from_power + + # fades longer than 100ms can use coarse fading (one gain step per ms), + # shorter fades will have audible clicks so they use precise fading + # (one gain step per sample) + if duration > 100: + scale_step = gain_delta / duration + + for i in range(duration): + volume_change = from_power + (scale_step * i) + chunk = self[start + i] + chunk = audioop.mul(chunk._data, + self.sample_width, + volume_change) + + output.append(chunk) + else: + start_frame = self.frame_count(ms=start) + end_frame = self.frame_count(ms=end) + fade_frames = end_frame - start_frame + scale_step = gain_delta / fade_frames + + for i in range(int(fade_frames)): + volume_change = from_power + (scale_step * i) + sample = self.get_frame(int(start_frame + i)) + sample = audioop.mul(sample, self.sample_width, volume_change) + + output.append(sample) + + # original data after the crossfade portion, at the new volume + after_fade = self[end:]._data + if to_gain != 0: + after_fade = audioop.mul(after_fade, + self.sample_width, + db_to_float(to_gain)) + output.append(after_fade) + + return self._spawn(data=output) + + def fade_out(self, duration): + return self.fade(to_gain=-120, duration=duration, end=float('inf')) + + def fade_in(self, duration): + return self.fade(from_gain=-120, duration=duration, start=0) + + def reverse(self): + return self._spawn( + data=audioop.reverse(self._data, self.sample_width) + ) + + def _repr_html_(self): + src = """ + + """ + fh = self.export() + data = base64.b64encode(fh.read()).decode('ascii') + return src.format(base64=data) + + +from . import effects diff --git a/hackaton/lib/python3.12/site-packages/pydub/effects.py b/hackaton/lib/python3.12/site-packages/pydub/effects.py new file mode 100644 index 0000000..0210521 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/pydub/effects.py @@ -0,0 +1,341 @@ +import sys +import math +import array +from .utils import ( + db_to_float, + ratio_to_db, + register_pydub_effect, + make_chunks, + audioop, + get_min_max_value +) +from .silence import split_on_silence +from .exceptions import TooManyMissingFrames, InvalidDuration + +if sys.version_info >= (3, 0): + xrange = range + + +@register_pydub_effect +def apply_mono_filter_to_each_channel(seg, filter_fn): + n_channels = seg.channels + + channel_segs = seg.split_to_mono() + channel_segs = [filter_fn(channel_seg) for channel_seg in channel_segs] + + out_data = seg.get_array_of_samples() + for channel_i, channel_seg in enumerate(channel_segs): + for sample_i, sample in enumerate(channel_seg.get_array_of_samples()): + index = (sample_i * n_channels) + channel_i + out_data[index] = sample + + return seg._spawn(out_data) + + +@register_pydub_effect +def normalize(seg, headroom=0.1): + """ + headroom is how close to the maximum volume to boost the signal up to (specified in dB) + """ + peak_sample_val = seg.max + + # if the max is 0, this audio segment is silent, and can't be normalized + if peak_sample_val == 0: + return seg + + target_peak = seg.max_possible_amplitude * db_to_float(-headroom) + + needed_boost = ratio_to_db(target_peak / peak_sample_val) + return seg.apply_gain(needed_boost) + + +@register_pydub_effect +def speedup(seg, playback_speed=1.5, chunk_size=150, crossfade=25): + # we will keep audio in 150ms chunks since one waveform at 20Hz is 50ms long + # (20 Hz is the lowest frequency audible to humans) + + # portion of AUDIO TO KEEP. if playback speed is 1.25 we keep 80% (0.8) and + # discard 20% (0.2) + atk = 1.0 / playback_speed + + if playback_speed < 2.0: + # throwing out more than half the audio - keep 50ms chunks + ms_to_remove_per_chunk = int(chunk_size * (1 - atk) / atk) + else: + # throwing out less than half the audio - throw out 50ms chunks + ms_to_remove_per_chunk = int(chunk_size) + chunk_size = int(atk * chunk_size / (1 - atk)) + + # the crossfade cannot be longer than the amount of audio we're removing + crossfade = min(crossfade, ms_to_remove_per_chunk - 1) + + # DEBUG + #print("chunk: {0}, rm: {1}".format(chunk_size, ms_to_remove_per_chunk)) + + chunks = make_chunks(seg, chunk_size + ms_to_remove_per_chunk) + if len(chunks) < 2: + raise Exception("Could not speed up AudioSegment, it was too short {2:0.2f}s for the current settings:\n{0}ms chunks at {1:0.1f}x speedup".format( + chunk_size, playback_speed, seg.duration_seconds)) + + # we'll actually truncate a bit less than we calculated to make up for the + # crossfade between chunks + ms_to_remove_per_chunk -= crossfade + + # we don't want to truncate the last chunk since it is not guaranteed to be + # the full chunk length + last_chunk = chunks[-1] + chunks = [chunk[:-ms_to_remove_per_chunk] for chunk in chunks[:-1]] + + out = chunks[0] + for chunk in chunks[1:]: + out = out.append(chunk, crossfade=crossfade) + + out += last_chunk + return out + + +@register_pydub_effect +def strip_silence(seg, silence_len=1000, silence_thresh=-16, padding=100): + if padding > silence_len: + raise InvalidDuration("padding cannot be longer than silence_len") + + chunks = split_on_silence(seg, silence_len, silence_thresh, padding) + crossfade = padding / 2 + + if not len(chunks): + return seg[0:0] + + seg = chunks[0] + for chunk in chunks[1:]: + seg = seg.append(chunk, crossfade=crossfade) + + return seg + + +@register_pydub_effect +def compress_dynamic_range(seg, threshold=-20.0, ratio=4.0, attack=5.0, release=50.0): + """ + Keyword Arguments: + + threshold - default: -20.0 + Threshold in dBFS. default of -20.0 means -20dB relative to the + maximum possible volume. 0dBFS is the maximum possible value so + all values for this argument sould be negative. + + ratio - default: 4.0 + Compression ratio. Audio louder than the threshold will be + reduced to 1/ratio the volume. A ratio of 4.0 is equivalent to + a setting of 4:1 in a pro-audio compressor like the Waves C1. + + attack - default: 5.0 + Attack in milliseconds. How long it should take for the compressor + to kick in once the audio has exceeded the threshold. + + release - default: 50.0 + Release in milliseconds. How long it should take for the compressor + to stop compressing after the audio has falled below the threshold. + + + For an overview of Dynamic Range Compression, and more detailed explanation + of the related terminology, see: + + http://en.wikipedia.org/wiki/Dynamic_range_compression + """ + + thresh_rms = seg.max_possible_amplitude * db_to_float(threshold) + + look_frames = int(seg.frame_count(ms=attack)) + def rms_at(frame_i): + return seg.get_sample_slice(frame_i - look_frames, frame_i).rms + def db_over_threshold(rms): + if rms == 0: return 0.0 + db = ratio_to_db(rms / thresh_rms) + return max(db, 0) + + output = [] + + # amount to reduce the volume of the audio by (in dB) + attenuation = 0.0 + + attack_frames = seg.frame_count(ms=attack) + release_frames = seg.frame_count(ms=release) + for i in xrange(int(seg.frame_count())): + rms_now = rms_at(i) + + # with a ratio of 4.0 this means the volume will exceed the threshold by + # 1/4 the amount (of dB) that it would otherwise + max_attenuation = (1 - (1.0 / ratio)) * db_over_threshold(rms_now) + + attenuation_inc = max_attenuation / attack_frames + attenuation_dec = max_attenuation / release_frames + + if rms_now > thresh_rms and attenuation <= max_attenuation: + attenuation += attenuation_inc + attenuation = min(attenuation, max_attenuation) + else: + attenuation -= attenuation_dec + attenuation = max(attenuation, 0) + + frame = seg.get_frame(i) + if attenuation != 0.0: + frame = audioop.mul(frame, + seg.sample_width, + db_to_float(-attenuation)) + + output.append(frame) + + return seg._spawn(data=b''.join(output)) + + +# Invert the phase of the signal. + +@register_pydub_effect + +def invert_phase(seg, channels=(1, 1)): + """ + channels- specifies which channel (left or right) to reverse the phase of. + Note that mono AudioSegments will become stereo. + """ + if channels == (1, 1): + inverted = audioop.mul(seg._data, seg.sample_width, -1.0) + return seg._spawn(data=inverted) + + else: + if seg.channels == 2: + left, right = seg.split_to_mono() + else: + raise Exception("Can't implicitly convert an AudioSegment with " + str(seg.channels) + " channels to stereo.") + + if channels == (1, 0): + left = left.invert_phase() + else: + right = right.invert_phase() + + return seg.from_mono_audiosegments(left, right) + + + +# High and low pass filters based on implementation found on Stack Overflow: +# http://stackoverflow.com/questions/13882038/implementing-simple-high-and-low-pass-filters-in-c + +@register_pydub_effect +def low_pass_filter(seg, cutoff): + """ + cutoff - Frequency (in Hz) where higher frequency signal will begin to + be reduced by 6dB per octave (doubling in frequency) above this point + """ + RC = 1.0 / (cutoff * 2 * math.pi) + dt = 1.0 / seg.frame_rate + + alpha = dt / (RC + dt) + + original = seg.get_array_of_samples() + filteredArray = array.array(seg.array_type, original) + + frame_count = int(seg.frame_count()) + + last_val = [0] * seg.channels + for i in range(seg.channels): + last_val[i] = filteredArray[i] = original[i] + + for i in range(1, frame_count): + for j in range(seg.channels): + offset = (i * seg.channels) + j + last_val[j] = last_val[j] + (alpha * (original[offset] - last_val[j])) + filteredArray[offset] = int(last_val[j]) + + return seg._spawn(data=filteredArray) + + +@register_pydub_effect +def high_pass_filter(seg, cutoff): + """ + cutoff - Frequency (in Hz) where lower frequency signal will begin to + be reduced by 6dB per octave (doubling in frequency) below this point + """ + RC = 1.0 / (cutoff * 2 * math.pi) + dt = 1.0 / seg.frame_rate + + alpha = RC / (RC + dt) + + minval, maxval = get_min_max_value(seg.sample_width * 8) + + original = seg.get_array_of_samples() + filteredArray = array.array(seg.array_type, original) + + frame_count = int(seg.frame_count()) + + last_val = [0] * seg.channels + for i in range(seg.channels): + last_val[i] = filteredArray[i] = original[i] + + for i in range(1, frame_count): + for j in range(seg.channels): + offset = (i * seg.channels) + j + offset_minus_1 = ((i-1) * seg.channels) + j + + last_val[j] = alpha * (last_val[j] + original[offset] - original[offset_minus_1]) + filteredArray[offset] = int(min(max(last_val[j], minval), maxval)) + + return seg._spawn(data=filteredArray) + + +@register_pydub_effect +def pan(seg, pan_amount): + """ + pan_amount should be between -1.0 (100% left) and +1.0 (100% right) + + When pan_amount == 0.0 the left/right balance is not changed. + + Panning does not alter the *perceived* loundness, but since loudness + is decreasing on one side, the other side needs to get louder to + compensate. When panned hard left, the left channel will be 3dB louder. + """ + if not -1.0 <= pan_amount <= 1.0: + raise ValueError("pan_amount should be between -1.0 (100% left) and +1.0 (100% right)") + + max_boost_db = ratio_to_db(2.0) + boost_db = abs(pan_amount) * max_boost_db + + boost_factor = db_to_float(boost_db) + reduce_factor = db_to_float(max_boost_db) - boost_factor + + reduce_db = ratio_to_db(reduce_factor) + + # Cut boost in half (max boost== 3dB) - in reality 2 speakers + # do not sum to a full 6 dB. + boost_db = boost_db / 2.0 + + if pan_amount < 0: + return seg.apply_gain_stereo(boost_db, reduce_db) + else: + return seg.apply_gain_stereo(reduce_db, boost_db) + + +@register_pydub_effect +def apply_gain_stereo(seg, left_gain=0.0, right_gain=0.0): + """ + left_gain - amount of gain to apply to the left channel (in dB) + right_gain - amount of gain to apply to the right channel (in dB) + + note: mono audio segments will be converted to stereo + """ + if seg.channels == 1: + left = right = seg + elif seg.channels == 2: + left, right = seg.split_to_mono() + + l_mult_factor = db_to_float(left_gain) + r_mult_factor = db_to_float(right_gain) + + left_data = audioop.mul(left._data, left.sample_width, l_mult_factor) + left_data = audioop.tostereo(left_data, left.sample_width, 1, 0) + + right_data = audioop.mul(right._data, right.sample_width, r_mult_factor) + right_data = audioop.tostereo(right_data, right.sample_width, 0, 1) + + output = audioop.add(left_data, right_data, seg.sample_width) + + return seg._spawn(data=output, + overrides={'channels': 2, + 'frame_width': 2 * seg.sample_width}) diff --git a/hackaton/lib/python3.12/site-packages/pydub/exceptions.py b/hackaton/lib/python3.12/site-packages/pydub/exceptions.py new file mode 100644 index 0000000..79d0743 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/pydub/exceptions.py @@ -0,0 +1,32 @@ +class PydubException(Exception): + """ + Base class for any Pydub exception + """ + + +class TooManyMissingFrames(PydubException): + pass + + +class InvalidDuration(PydubException): + pass + + +class InvalidTag(PydubException): + pass + + +class InvalidID3TagVersion(PydubException): + pass + + +class CouldntDecodeError(PydubException): + pass + + +class CouldntEncodeError(PydubException): + pass + + +class MissingAudioParameter(PydubException): + pass diff --git a/hackaton/lib/python3.12/site-packages/pydub/generators.py b/hackaton/lib/python3.12/site-packages/pydub/generators.py new file mode 100644 index 0000000..b04cb4c --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/pydub/generators.py @@ -0,0 +1,142 @@ +""" +Each generator will return float samples from -1.0 to 1.0, which can be +converted to actual audio with 8, 16, 24, or 32 bit depth using the +SiganlGenerator.to_audio_segment() method (on any of it's subclasses). + +See Wikipedia's "waveform" page for info on some of the generators included +here: http://en.wikipedia.org/wiki/Waveform +""" + +import math +import array +import itertools +import random +from .audio_segment import AudioSegment +from .utils import ( + db_to_float, + get_frame_width, + get_array_type, + get_min_max_value +) + + + +class SignalGenerator(object): + def __init__(self, sample_rate=44100, bit_depth=16): + self.sample_rate = sample_rate + self.bit_depth = bit_depth + + def to_audio_segment(self, duration=1000.0, volume=0.0): + """ + Duration in milliseconds + (default: 1 second) + Volume in DB relative to maximum amplitude + (default 0.0 dBFS, which is the maximum value) + """ + minval, maxval = get_min_max_value(self.bit_depth) + sample_width = get_frame_width(self.bit_depth) + array_type = get_array_type(self.bit_depth) + + gain = db_to_float(volume) + sample_count = int(self.sample_rate * (duration / 1000.0)) + + sample_data = (int(val * maxval * gain) for val in self.generate()) + sample_data = itertools.islice(sample_data, 0, sample_count) + + data = array.array(array_type, sample_data) + + try: + data = data.tobytes() + except: + data = data.tostring() + + return AudioSegment(data=data, metadata={ + "channels": 1, + "sample_width": sample_width, + "frame_rate": self.sample_rate, + "frame_width": sample_width, + }) + + def generate(self): + raise NotImplementedError("SignalGenerator subclasses must implement the generate() method, and *should not* call the superclass implementation.") + + + +class Sine(SignalGenerator): + def __init__(self, freq, **kwargs): + super(Sine, self).__init__(**kwargs) + self.freq = freq + + def generate(self): + sine_of = (self.freq * 2 * math.pi) / self.sample_rate + sample_n = 0 + while True: + yield math.sin(sine_of * sample_n) + sample_n += 1 + + + +class Pulse(SignalGenerator): + def __init__(self, freq, duty_cycle=0.5, **kwargs): + super(Pulse, self).__init__(**kwargs) + self.freq = freq + self.duty_cycle = duty_cycle + + def generate(self): + sample_n = 0 + + # in samples + cycle_length = self.sample_rate / float(self.freq) + pulse_length = cycle_length * self.duty_cycle + + while True: + if (sample_n % cycle_length) < pulse_length: + yield 1.0 + else: + yield -1.0 + sample_n += 1 + + + +class Square(Pulse): + def __init__(self, freq, **kwargs): + kwargs['duty_cycle'] = 0.5 + super(Square, self).__init__(freq, **kwargs) + + + +class Sawtooth(SignalGenerator): + def __init__(self, freq, duty_cycle=1.0, **kwargs): + super(Sawtooth, self).__init__(**kwargs) + self.freq = freq + self.duty_cycle = duty_cycle + + def generate(self): + sample_n = 0 + + # in samples + cycle_length = self.sample_rate / float(self.freq) + midpoint = cycle_length * self.duty_cycle + ascend_length = midpoint + descend_length = cycle_length - ascend_length + + while True: + cycle_position = sample_n % cycle_length + if cycle_position < midpoint: + yield (2 * cycle_position / ascend_length) - 1.0 + else: + yield 1.0 - (2 * (cycle_position - midpoint) / descend_length) + sample_n += 1 + + + +class Triangle(Sawtooth): + def __init__(self, freq, **kwargs): + kwargs['duty_cycle'] = 0.5 + super(Triangle, self).__init__(freq, **kwargs) + + +class WhiteNoise(SignalGenerator): + def generate(self): + while True: + yield (random.random() * 2) - 1.0 diff --git a/hackaton/lib/python3.12/site-packages/pydub/logging_utils.py b/hackaton/lib/python3.12/site-packages/pydub/logging_utils.py new file mode 100644 index 0000000..a312bd2 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/pydub/logging_utils.py @@ -0,0 +1,14 @@ +""" + +""" +import logging + +converter_logger = logging.getLogger("pydub.converter") + +def log_conversion(conversion_command): + converter_logger.debug("subprocess.call(%s)", repr(conversion_command)) + +def log_subprocess_output(output): + if output: + for line in output.rstrip().splitlines(): + converter_logger.debug('subprocess output: %s', line.rstrip()) diff --git a/hackaton/lib/python3.12/site-packages/pydub/playback.py b/hackaton/lib/python3.12/site-packages/pydub/playback.py new file mode 100644 index 0000000..72ce4a5 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/pydub/playback.py @@ -0,0 +1,71 @@ +""" +Support for playing AudioSegments. Pyaudio will be used if it's installed, +otherwise will fallback to ffplay. Pyaudio is a *much* nicer solution, but +is tricky to install. See my notes on installing pyaudio in a virtualenv (on +OSX 10.10): https://gist.github.com/jiaaro/9767512210a1d80a8a0d +""" + +import subprocess +from tempfile import NamedTemporaryFile +from .utils import get_player_name, make_chunks + +def _play_with_ffplay(seg): + PLAYER = get_player_name() + with NamedTemporaryFile("w+b", suffix=".wav") as f: + seg.export(f.name, "wav") + subprocess.call([PLAYER, "-nodisp", "-autoexit", "-hide_banner", f.name]) + + +def _play_with_pyaudio(seg): + import pyaudio + + p = pyaudio.PyAudio() + stream = p.open(format=p.get_format_from_width(seg.sample_width), + channels=seg.channels, + rate=seg.frame_rate, + output=True) + + # Just in case there were any exceptions/interrupts, we release the resource + # So as not to raise OSError: Device Unavailable should play() be used again + try: + # break audio into half-second chunks (to allows keyboard interrupts) + for chunk in make_chunks(seg, 500): + stream.write(chunk._data) + finally: + stream.stop_stream() + stream.close() + + p.terminate() + + +def _play_with_simpleaudio(seg): + import simpleaudio + return simpleaudio.play_buffer( + seg.raw_data, + num_channels=seg.channels, + bytes_per_sample=seg.sample_width, + sample_rate=seg.frame_rate + ) + + +def play(audio_segment): + try: + playback = _play_with_simpleaudio(audio_segment) + try: + playback.wait_done() + except KeyboardInterrupt: + playback.stop() + except ImportError: + pass + else: + return + + try: + _play_with_pyaudio(audio_segment) + return + except ImportError: + pass + else: + return + + _play_with_ffplay(audio_segment) diff --git a/hackaton/lib/python3.12/site-packages/pydub/pyaudioop.py b/hackaton/lib/python3.12/site-packages/pydub/pyaudioop.py new file mode 100644 index 0000000..9b1e2fb --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/pydub/pyaudioop.py @@ -0,0 +1,553 @@ +try: + from __builtin__ import max as builtin_max + from __builtin__ import min as builtin_min +except ImportError: + from builtins import max as builtin_max + from builtins import min as builtin_min +import math +import struct +try: + from fractions import gcd +except ImportError: # Python 3.9+ + from math import gcd +from ctypes import create_string_buffer + + +class error(Exception): + pass + + +def _check_size(size): + if size != 1 and size != 2 and size != 4: + raise error("Size should be 1, 2 or 4") + + +def _check_params(length, size): + _check_size(size) + if length % size != 0: + raise error("not a whole number of frames") + + +def _sample_count(cp, size): + return len(cp) / size + + +def _get_samples(cp, size, signed=True): + for i in range(_sample_count(cp, size)): + yield _get_sample(cp, size, i, signed) + + +def _struct_format(size, signed): + if size == 1: + return "b" if signed else "B" + elif size == 2: + return "h" if signed else "H" + elif size == 4: + return "i" if signed else "I" + + +def _get_sample(cp, size, i, signed=True): + fmt = _struct_format(size, signed) + start = i * size + end = start + size + return struct.unpack_from(fmt, buffer(cp)[start:end])[0] + + +def _put_sample(cp, size, i, val, signed=True): + fmt = _struct_format(size, signed) + struct.pack_into(fmt, cp, i * size, val) + + +def _get_maxval(size, signed=True): + if signed and size == 1: + return 0x7f + elif size == 1: + return 0xff + elif signed and size == 2: + return 0x7fff + elif size == 2: + return 0xffff + elif signed and size == 4: + return 0x7fffffff + elif size == 4: + return 0xffffffff + + +def _get_minval(size, signed=True): + if not signed: + return 0 + elif size == 1: + return -0x80 + elif size == 2: + return -0x8000 + elif size == 4: + return -0x80000000 + + +def _get_clipfn(size, signed=True): + maxval = _get_maxval(size, signed) + minval = _get_minval(size, signed) + return lambda val: builtin_max(min(val, maxval), minval) + + +def _overflow(val, size, signed=True): + minval = _get_minval(size, signed) + maxval = _get_maxval(size, signed) + if minval <= val <= maxval: + return val + + bits = size * 8 + if signed: + offset = 2**(bits-1) + return ((val + offset) % (2**bits)) - offset + else: + return val % (2**bits) + + +def getsample(cp, size, i): + _check_params(len(cp), size) + if not (0 <= i < len(cp) / size): + raise error("Index out of range") + return _get_sample(cp, size, i) + + +def max(cp, size): + _check_params(len(cp), size) + + if len(cp) == 0: + return 0 + + return builtin_max(abs(sample) for sample in _get_samples(cp, size)) + + +def minmax(cp, size): + _check_params(len(cp), size) + + max_sample, min_sample = 0, 0 + for sample in _get_samples(cp, size): + max_sample = builtin_max(sample, max_sample) + min_sample = builtin_min(sample, min_sample) + + return min_sample, max_sample + + +def avg(cp, size): + _check_params(len(cp), size) + sample_count = _sample_count(cp, size) + if sample_count == 0: + return 0 + return sum(_get_samples(cp, size)) / sample_count + + +def rms(cp, size): + _check_params(len(cp), size) + + sample_count = _sample_count(cp, size) + if sample_count == 0: + return 0 + + sum_squares = sum(sample**2 for sample in _get_samples(cp, size)) + return int(math.sqrt(sum_squares / sample_count)) + + +def _sum2(cp1, cp2, length): + size = 2 + total = 0 + for i in range(length): + total += getsample(cp1, size, i) * getsample(cp2, size, i) + return total + + +def findfit(cp1, cp2): + size = 2 + + if len(cp1) % 2 != 0 or len(cp2) % 2 != 0: + raise error("Strings should be even-sized") + + if len(cp1) < len(cp2): + raise error("First sample should be longer") + + len1 = _sample_count(cp1, size) + len2 = _sample_count(cp2, size) + + sum_ri_2 = _sum2(cp2, cp2, len2) + sum_aij_2 = _sum2(cp1, cp1, len2) + sum_aij_ri = _sum2(cp1, cp2, len2) + + result = (sum_ri_2 * sum_aij_2 - sum_aij_ri * sum_aij_ri) / sum_aij_2 + + best_result = result + best_i = 0 + + for i in range(1, len1 - len2 + 1): + aj_m1 = _get_sample(cp1, size, i - 1) + aj_lm1 = _get_sample(cp1, size, i + len2 - 1) + + sum_aij_2 += aj_lm1**2 - aj_m1**2 + sum_aij_ri = _sum2(buffer(cp1)[i*size:], cp2, len2) + + result = (sum_ri_2 * sum_aij_2 - sum_aij_ri * sum_aij_ri) / sum_aij_2 + + if result < best_result: + best_result = result + best_i = i + + factor = _sum2(buffer(cp1)[best_i*size:], cp2, len2) / sum_ri_2 + + return best_i, factor + + +def findfactor(cp1, cp2): + size = 2 + + if len(cp1) % 2 != 0: + raise error("Strings should be even-sized") + + if len(cp1) != len(cp2): + raise error("Samples should be same size") + + sample_count = _sample_count(cp1, size) + + sum_ri_2 = _sum2(cp2, cp2, sample_count) + sum_aij_ri = _sum2(cp1, cp2, sample_count) + + return sum_aij_ri / sum_ri_2 + + +def findmax(cp, len2): + size = 2 + sample_count = _sample_count(cp, size) + + if len(cp) % 2 != 0: + raise error("Strings should be even-sized") + + if len2 < 0 or sample_count < len2: + raise error("Input sample should be longer") + + if sample_count == 0: + return 0 + + result = _sum2(cp, cp, len2) + best_result = result + best_i = 0 + + for i in range(1, sample_count - len2 + 1): + sample_leaving_window = getsample(cp, size, i - 1) + sample_entering_window = getsample(cp, size, i + len2 - 1) + + result -= sample_leaving_window**2 + result += sample_entering_window**2 + + if result > best_result: + best_result = result + best_i = i + + return best_i + + +def avgpp(cp, size): + _check_params(len(cp), size) + sample_count = _sample_count(cp, size) + + prevextremevalid = False + prevextreme = None + avg = 0 + nextreme = 0 + + prevval = getsample(cp, size, 0) + val = getsample(cp, size, 1) + + prevdiff = val - prevval + + for i in range(1, sample_count): + val = getsample(cp, size, i) + diff = val - prevval + + if diff * prevdiff < 0: + if prevextremevalid: + avg += abs(prevval - prevextreme) + nextreme += 1 + + prevextremevalid = True + prevextreme = prevval + + prevval = val + if diff != 0: + prevdiff = diff + + if nextreme == 0: + return 0 + + return avg / nextreme + + +def maxpp(cp, size): + _check_params(len(cp), size) + sample_count = _sample_count(cp, size) + + prevextremevalid = False + prevextreme = None + max = 0 + + prevval = getsample(cp, size, 0) + val = getsample(cp, size, 1) + + prevdiff = val - prevval + + for i in range(1, sample_count): + val = getsample(cp, size, i) + diff = val - prevval + + if diff * prevdiff < 0: + if prevextremevalid: + extremediff = abs(prevval - prevextreme) + if extremediff > max: + max = extremediff + prevextremevalid = True + prevextreme = prevval + + prevval = val + if diff != 0: + prevdiff = diff + + return max + + +def cross(cp, size): + _check_params(len(cp), size) + + crossings = 0 + last_sample = 0 + for sample in _get_samples(cp, size): + if sample <= 0 < last_sample or sample >= 0 > last_sample: + crossings += 1 + last_sample = sample + + return crossings + + +def mul(cp, size, factor): + _check_params(len(cp), size) + clip = _get_clipfn(size) + + result = create_string_buffer(len(cp)) + + for i, sample in enumerate(_get_samples(cp, size)): + sample = clip(int(sample * factor)) + _put_sample(result, size, i, sample) + + return result.raw + + +def tomono(cp, size, fac1, fac2): + _check_params(len(cp), size) + clip = _get_clipfn(size) + + sample_count = _sample_count(cp, size) + + result = create_string_buffer(len(cp) / 2) + + for i in range(0, sample_count, 2): + l_sample = getsample(cp, size, i) + r_sample = getsample(cp, size, i + 1) + + sample = (l_sample * fac1) + (r_sample * fac2) + sample = clip(sample) + + _put_sample(result, size, i / 2, sample) + + return result.raw + + +def tostereo(cp, size, fac1, fac2): + _check_params(len(cp), size) + + sample_count = _sample_count(cp, size) + + result = create_string_buffer(len(cp) * 2) + clip = _get_clipfn(size) + + for i in range(sample_count): + sample = _get_sample(cp, size, i) + + l_sample = clip(sample * fac1) + r_sample = clip(sample * fac2) + + _put_sample(result, size, i * 2, l_sample) + _put_sample(result, size, i * 2 + 1, r_sample) + + return result.raw + + +def add(cp1, cp2, size): + _check_params(len(cp1), size) + + if len(cp1) != len(cp2): + raise error("Lengths should be the same") + + clip = _get_clipfn(size) + sample_count = _sample_count(cp1, size) + result = create_string_buffer(len(cp1)) + + for i in range(sample_count): + sample1 = getsample(cp1, size, i) + sample2 = getsample(cp2, size, i) + + sample = clip(sample1 + sample2) + + _put_sample(result, size, i, sample) + + return result.raw + + +def bias(cp, size, bias): + _check_params(len(cp), size) + + result = create_string_buffer(len(cp)) + + for i, sample in enumerate(_get_samples(cp, size)): + sample = _overflow(sample + bias, size) + _put_sample(result, size, i, sample) + + return result.raw + + +def reverse(cp, size): + _check_params(len(cp), size) + sample_count = _sample_count(cp, size) + + result = create_string_buffer(len(cp)) + for i, sample in enumerate(_get_samples(cp, size)): + _put_sample(result, size, sample_count - i - 1, sample) + + return result.raw + + +def lin2lin(cp, size, size2): + _check_params(len(cp), size) + _check_size(size2) + + if size == size2: + return cp + + new_len = (len(cp) / size) * size2 + + result = create_string_buffer(new_len) + + for i in range(_sample_count(cp, size)): + sample = _get_sample(cp, size, i) + if size < size2: + sample = sample << (4 * size2 / size) + elif size > size2: + sample = sample >> (4 * size / size2) + + sample = _overflow(sample, size2) + + _put_sample(result, size2, i, sample) + + return result.raw + + +def ratecv(cp, size, nchannels, inrate, outrate, state, weightA=1, weightB=0): + _check_params(len(cp), size) + if nchannels < 1: + raise error("# of channels should be >= 1") + + bytes_per_frame = size * nchannels + frame_count = len(cp) / bytes_per_frame + + if bytes_per_frame / nchannels != size: + raise OverflowError("width * nchannels too big for a C int") + + if weightA < 1 or weightB < 0: + raise error("weightA should be >= 1, weightB should be >= 0") + + if len(cp) % bytes_per_frame != 0: + raise error("not a whole number of frames") + + if inrate <= 0 or outrate <= 0: + raise error("sampling rate not > 0") + + d = gcd(inrate, outrate) + inrate /= d + outrate /= d + + prev_i = [0] * nchannels + cur_i = [0] * nchannels + + if state is None: + d = -outrate + else: + d, samps = state + + if len(samps) != nchannels: + raise error("illegal state argument") + + prev_i, cur_i = zip(*samps) + prev_i, cur_i = list(prev_i), list(cur_i) + + q = frame_count / inrate + ceiling = (q + 1) * outrate + nbytes = ceiling * bytes_per_frame + + result = create_string_buffer(nbytes) + + samples = _get_samples(cp, size) + out_i = 0 + while True: + while d < 0: + if frame_count == 0: + samps = zip(prev_i, cur_i) + retval = result.raw + + # slice off extra bytes + trim_index = (out_i * bytes_per_frame) - len(retval) + retval = buffer(retval)[:trim_index] + + return (retval, (d, tuple(samps))) + + for chan in range(nchannels): + prev_i[chan] = cur_i[chan] + cur_i[chan] = samples.next() + + cur_i[chan] = ( + (weightA * cur_i[chan] + weightB * prev_i[chan]) + / (weightA + weightB) + ) + + frame_count -= 1 + d += outrate + + while d >= 0: + for chan in range(nchannels): + cur_o = ( + (prev_i[chan] * d + cur_i[chan] * (outrate - d)) + / outrate + ) + _put_sample(result, size, out_i, _overflow(cur_o, size)) + out_i += 1 + d -= inrate + + +def lin2ulaw(cp, size): + raise NotImplementedError() + + +def ulaw2lin(cp, size): + raise NotImplementedError() + + +def lin2alaw(cp, size): + raise NotImplementedError() + + +def alaw2lin(cp, size): + raise NotImplementedError() + + +def lin2adpcm(cp, size, state): + raise NotImplementedError() + + +def adpcm2lin(cp, size, state): + raise NotImplementedError() diff --git a/hackaton/lib/python3.12/site-packages/pydub/scipy_effects.py b/hackaton/lib/python3.12/site-packages/pydub/scipy_effects.py new file mode 100644 index 0000000..abab2b4 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/pydub/scipy_effects.py @@ -0,0 +1,175 @@ +""" +This module provides scipy versions of high_pass_filter, and low_pass_filter +as well as an additional band_pass_filter. + +Of course, you will need to install scipy for these to work. + +When this module is imported the high and low pass filters from this module +will be used when calling audio_segment.high_pass_filter() and +audio_segment.high_pass_filter() instead of the slower, less powerful versions +provided by pydub.effects. +""" +from scipy.signal import butter, sosfilt +from .utils import (register_pydub_effect,stereo_to_ms,ms_to_stereo) + + +def _mk_butter_filter(freq, type, order): + """ + Args: + freq: The cutoff frequency for highpass and lowpass filters. For + band filters, a list of [low_cutoff, high_cutoff] + type: "lowpass", "highpass", or "band" + order: nth order butterworth filter (default: 5th order). The + attenuation is -6dB/octave beyond the cutoff frequency (for 1st + order). A Higher order filter will have more attenuation, each level + adding an additional -6dB (so a 3rd order butterworth filter would + be -18dB/octave). + + Returns: + function which can filter a mono audio segment + + """ + def filter_fn(seg): + assert seg.channels == 1 + + nyq = 0.5 * seg.frame_rate + try: + freqs = [f / nyq for f in freq] + except TypeError: + freqs = freq / nyq + + sos = butter(order, freqs, btype=type, output='sos') + y = sosfilt(sos, seg.get_array_of_samples()) + + return seg._spawn(y.astype(seg.array_type)) + + return filter_fn + + +@register_pydub_effect +def band_pass_filter(seg, low_cutoff_freq, high_cutoff_freq, order=5): + filter_fn = _mk_butter_filter([low_cutoff_freq, high_cutoff_freq], 'band', order=order) + return seg.apply_mono_filter_to_each_channel(filter_fn) + + +@register_pydub_effect +def high_pass_filter(seg, cutoff_freq, order=5): + filter_fn = _mk_butter_filter(cutoff_freq, 'highpass', order=order) + return seg.apply_mono_filter_to_each_channel(filter_fn) + + +@register_pydub_effect +def low_pass_filter(seg, cutoff_freq, order=5): + filter_fn = _mk_butter_filter(cutoff_freq, 'lowpass', order=order) + return seg.apply_mono_filter_to_each_channel(filter_fn) + + +@register_pydub_effect +def _eq(seg, focus_freq, bandwidth=100, mode="peak", gain_dB=0, order=2): + """ + Args: + focus_freq - middle frequency or known frequency of band (in Hz) + bandwidth - range of the equalizer band + mode - Mode of Equalization(Peak/Notch(Bell Curve),High Shelf, Low Shelf) + order - Rolloff factor(1 - 6dB/Octave 2 - 12dB/Octave) + + Returns: + Equalized/Filtered AudioSegment + """ + filt_mode = ["peak", "low_shelf", "high_shelf"] + if mode not in filt_mode: + raise ValueError("Incorrect Mode Selection") + + if gain_dB >= 0: + if mode == "peak": + sec = band_pass_filter(seg, focus_freq - bandwidth/2, focus_freq + bandwidth/2, order = order) + seg = seg.overlay(sec - (3 - gain_dB)) + return seg + + if mode == "low_shelf": + sec = low_pass_filter(seg, focus_freq, order=order) + seg = seg.overlay(sec - (3 - gain_dB)) + return seg + + if mode == "high_shelf": + sec = high_pass_filter(seg, focus_freq, order=order) + seg = seg.overlay(sec - (3 - gain_dB)) + return seg + + if gain_dB < 0: + if mode == "peak": + sec = high_pass_filter(seg, focus_freq - bandwidth/2, order=order) + seg = seg.overlay(sec - (3 + gain_dB)) + gain_dB + sec = low_pass_filter(seg, focus_freq + bandwidth/2, order=order) + seg = seg.overlay(sec - (3 + gain_dB)) + gain_dB + return seg + + if mode == "low_shelf": + sec = high_pass_filter(seg, focus_freq, order=order) + seg = seg.overlay(sec - (3 + gain_dB)) + gain_dB + return seg + + if mode=="high_shelf": + sec=low_pass_filter(seg, focus_freq, order=order) + seg=seg.overlay(sec - (3 + gain_dB)) +gain_dB + return seg + + +@register_pydub_effect +def eq(seg, focus_freq, bandwidth=100, channel_mode="L+R", filter_mode="peak", gain_dB=0, order=2): + """ + Args: + focus_freq - middle frequency or known frequency of band (in Hz) + bandwidth - range of the equalizer band + channel_mode - Select Channels to be affected by the filter. + L+R - Standard Stereo Filter + L - Only Left Channel is Filtered + R - Only Right Channel is Filtered + M+S - Blumlien Stereo Filter(Mid-Side) + M - Only Mid Channel is Filtered + S - Only Side Channel is Filtered + Mono Audio Segments are completely filtered. + filter_mode - Mode of Equalization(Peak/Notch(Bell Curve),High Shelf, Low Shelf) + order - Rolloff factor(1 - 6dB/Octave 2 - 12dB/Octave) + + Returns: + Equalized/Filtered AudioSegment + """ + channel_modes = ["L+R", "M+S", "L", "R", "M", "S"] + if channel_mode not in channel_modes: + raise ValueError("Incorrect Channel Mode Selection") + + if seg.channels == 1: + return _eq(seg, focus_freq, bandwidth, filter_mode, gain_dB, order) + + if channel_mode == "L+R": + return _eq(seg, focus_freq, bandwidth, filter_mode, gain_dB, order) + + if channel_mode == "L": + seg = seg.split_to_mono() + seg = [_eq(seg[0], focus_freq, bandwidth, filter_mode, gain_dB, order), seg[1]] + return AudioSegment.from_mono_audio_segements(seg[0], seg[1]) + + if channel_mode == "R": + seg = seg.split_to_mono() + seg = [seg[0], _eq(seg[1], focus_freq, bandwidth, filter_mode, gain_dB, order)] + return AudioSegment.from_mono_audio_segements(seg[0], seg[1]) + + if channel_mode == "M+S": + seg = stereo_to_ms(seg) + seg = _eq(seg, focus_freq, bandwidth, filter_mode, gain_dB, order) + return ms_to_stereo(seg) + + if channel_mode == "M": + seg = stereo_to_ms(seg).split_to_mono() + seg = [_eq(seg[0], focus_freq, bandwidth, filter_mode, gain_dB, order), seg[1]] + seg = AudioSegment.from_mono_audio_segements(seg[0], seg[1]) + return ms_to_stereo(seg) + + if channel_mode == "S": + seg = stereo_to_ms(seg).split_to_mono() + seg = [seg[0], _eq(seg[1], focus_freq, bandwidth, filter_mode, gain_dB, order)] + seg = AudioSegment.from_mono_audio_segements(seg[0], seg[1]) + return ms_to_stereo(seg) + + diff --git a/hackaton/lib/python3.12/site-packages/pydub/silence.py b/hackaton/lib/python3.12/site-packages/pydub/silence.py new file mode 100644 index 0000000..0ad1499 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/pydub/silence.py @@ -0,0 +1,182 @@ +""" +Various functions for finding/manipulating silence in AudioSegments +""" +import itertools + +from .utils import db_to_float + + +def detect_silence(audio_segment, min_silence_len=1000, silence_thresh=-16, seek_step=1): + """ + Returns a list of all silent sections [start, end] in milliseconds of audio_segment. + Inverse of detect_nonsilent() + + audio_segment - the segment to find silence in + min_silence_len - the minimum length for any silent section + silence_thresh - the upper bound for how quiet is silent in dFBS + seek_step - step size for interating over the segment in ms + """ + seg_len = len(audio_segment) + + # you can't have a silent portion of a sound that is longer than the sound + if seg_len < min_silence_len: + return [] + + # convert silence threshold to a float value (so we can compare it to rms) + silence_thresh = db_to_float(silence_thresh) * audio_segment.max_possible_amplitude + + # find silence and add start and end indicies to the to_cut list + silence_starts = [] + + # check successive (1 sec by default) chunk of sound for silence + # try a chunk at every "seek step" (or every chunk for a seek step == 1) + last_slice_start = seg_len - min_silence_len + slice_starts = range(0, last_slice_start + 1, seek_step) + + # guarantee last_slice_start is included in the range + # to make sure the last portion of the audio is searched + if last_slice_start % seek_step: + slice_starts = itertools.chain(slice_starts, [last_slice_start]) + + for i in slice_starts: + audio_slice = audio_segment[i:i + min_silence_len] + if audio_slice.rms <= silence_thresh: + silence_starts.append(i) + + # short circuit when there is no silence + if not silence_starts: + return [] + + # combine the silence we detected into ranges (start ms - end ms) + silent_ranges = [] + + prev_i = silence_starts.pop(0) + current_range_start = prev_i + + for silence_start_i in silence_starts: + continuous = (silence_start_i == prev_i + seek_step) + + # sometimes two small blips are enough for one particular slice to be + # non-silent, despite the silence all running together. Just combine + # the two overlapping silent ranges. + silence_has_gap = silence_start_i > (prev_i + min_silence_len) + + if not continuous and silence_has_gap: + silent_ranges.append([current_range_start, + prev_i + min_silence_len]) + current_range_start = silence_start_i + prev_i = silence_start_i + + silent_ranges.append([current_range_start, + prev_i + min_silence_len]) + + return silent_ranges + + +def detect_nonsilent(audio_segment, min_silence_len=1000, silence_thresh=-16, seek_step=1): + """ + Returns a list of all nonsilent sections [start, end] in milliseconds of audio_segment. + Inverse of detect_silent() + + audio_segment - the segment to find silence in + min_silence_len - the minimum length for any silent section + silence_thresh - the upper bound for how quiet is silent in dFBS + seek_step - step size for interating over the segment in ms + """ + silent_ranges = detect_silence(audio_segment, min_silence_len, silence_thresh, seek_step) + len_seg = len(audio_segment) + + # if there is no silence, the whole thing is nonsilent + if not silent_ranges: + return [[0, len_seg]] + + # short circuit when the whole audio segment is silent + if silent_ranges[0][0] == 0 and silent_ranges[0][1] == len_seg: + return [] + + prev_end_i = 0 + nonsilent_ranges = [] + for start_i, end_i in silent_ranges: + nonsilent_ranges.append([prev_end_i, start_i]) + prev_end_i = end_i + + if end_i != len_seg: + nonsilent_ranges.append([prev_end_i, len_seg]) + + if nonsilent_ranges[0] == [0, 0]: + nonsilent_ranges.pop(0) + + return nonsilent_ranges + + +def split_on_silence(audio_segment, min_silence_len=1000, silence_thresh=-16, keep_silence=100, + seek_step=1): + """ + Returns list of audio segments from splitting audio_segment on silent sections + + audio_segment - original pydub.AudioSegment() object + + min_silence_len - (in ms) minimum length of a silence to be used for + a split. default: 1000ms + + silence_thresh - (in dBFS) anything quieter than this will be + considered silence. default: -16dBFS + + keep_silence - (in ms or True/False) leave some silence at the beginning + and end of the chunks. Keeps the sound from sounding like it + is abruptly cut off. + When the length of the silence is less than the keep_silence duration + it is split evenly between the preceding and following non-silent + segments. + If True is specified, all the silence is kept, if False none is kept. + default: 100ms + + seek_step - step size for interating over the segment in ms + """ + + # from the itertools documentation + def pairwise(iterable): + "s -> (s0,s1), (s1,s2), (s2, s3), ..." + a, b = itertools.tee(iterable) + next(b, None) + return zip(a, b) + + if isinstance(keep_silence, bool): + keep_silence = len(audio_segment) if keep_silence else 0 + + output_ranges = [ + [ start - keep_silence, end + keep_silence ] + for (start,end) + in detect_nonsilent(audio_segment, min_silence_len, silence_thresh, seek_step) + ] + + for range_i, range_ii in pairwise(output_ranges): + last_end = range_i[1] + next_start = range_ii[0] + if next_start < last_end: + range_i[1] = (last_end+next_start)//2 + range_ii[0] = range_i[1] + + return [ + audio_segment[ max(start,0) : min(end,len(audio_segment)) ] + for start,end in output_ranges + ] + + +def detect_leading_silence(sound, silence_threshold=-50.0, chunk_size=10): + """ + Returns the millisecond/index that the leading silence ends. + + audio_segment - the segment to find silence in + silence_threshold - the upper bound for how quiet is silent in dFBS + chunk_size - chunk size for interating over the segment in ms + """ + trim_ms = 0 # ms + assert chunk_size > 0 # to avoid infinite loop + while sound[trim_ms:trim_ms+chunk_size].dBFS < silence_threshold and trim_ms < len(sound): + trim_ms += chunk_size + + # if there is no end it should return the length of the segment + return min(trim_ms, len(sound)) + + diff --git a/hackaton/lib/python3.12/site-packages/pydub/utils.py b/hackaton/lib/python3.12/site-packages/pydub/utils.py new file mode 100644 index 0000000..740c500 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/pydub/utils.py @@ -0,0 +1,434 @@ +from __future__ import division + +import json +import os +import re +import sys +from subprocess import Popen, PIPE +from math import log, ceil +from tempfile import TemporaryFile +from warnings import warn +from functools import wraps + +try: + import audioop +except ImportError: + import pyaudioop as audioop + +if sys.version_info >= (3, 0): + basestring = str + +FRAME_WIDTHS = { + 8: 1, + 16: 2, + 32: 4, +} +ARRAY_TYPES = { + 8: "b", + 16: "h", + 32: "i", +} +ARRAY_RANGES = { + 8: (-0x80, 0x7f), + 16: (-0x8000, 0x7fff), + 32: (-0x80000000, 0x7fffffff), +} + + +def get_frame_width(bit_depth): + return FRAME_WIDTHS[bit_depth] + + +def get_array_type(bit_depth, signed=True): + t = ARRAY_TYPES[bit_depth] + if not signed: + t = t.upper() + return t + + +def get_min_max_value(bit_depth): + return ARRAY_RANGES[bit_depth] + + +def _fd_or_path_or_tempfile(fd, mode='w+b', tempfile=True): + close_fd = False + if fd is None and tempfile: + fd = TemporaryFile(mode=mode) + close_fd = True + + if isinstance(fd, basestring): + fd = open(fd, mode=mode) + close_fd = True + + try: + if isinstance(fd, os.PathLike): + fd = open(fd, mode=mode) + close_fd = True + except AttributeError: + # module os has no attribute PathLike, so we're on python < 3.6. + # The protocol we're trying to support doesn't exist, so just pass. + pass + + return fd, close_fd + + +def db_to_float(db, using_amplitude=True): + """ + Converts the input db to a float, which represents the equivalent + ratio in power. + """ + db = float(db) + if using_amplitude: + return 10 ** (db / 20) + else: # using power + return 10 ** (db / 10) + + +def ratio_to_db(ratio, val2=None, using_amplitude=True): + """ + Converts the input float to db, which represents the equivalent + to the ratio in power represented by the multiplier passed in. + """ + ratio = float(ratio) + + # accept 2 values and use the ratio of val1 to val2 + if val2 is not None: + ratio = ratio / val2 + + # special case for multiply-by-zero (convert to silence) + if ratio == 0: + return -float('inf') + + if using_amplitude: + return 20 * log(ratio, 10) + else: # using power + return 10 * log(ratio, 10) + + +def register_pydub_effect(fn, name=None): + """ + decorator for adding pydub effects to the AudioSegment objects. + example use: + @register_pydub_effect + def normalize(audio_segment): + ... + or you can specify a name: + @register_pydub_effect("normalize") + def normalize_audio_segment(audio_segment): + ... + """ + if isinstance(fn, basestring): + name = fn + return lambda fn: register_pydub_effect(fn, name) + + if name is None: + name = fn.__name__ + + from .audio_segment import AudioSegment + setattr(AudioSegment, name, fn) + return fn + + +def make_chunks(audio_segment, chunk_length): + """ + Breaks an AudioSegment into chunks that are milliseconds + long. + if chunk_length is 50 then you'll get a list of 50 millisecond long audio + segments back (except the last one, which can be shorter) + """ + number_of_chunks = ceil(len(audio_segment) / float(chunk_length)) + return [audio_segment[i * chunk_length:(i + 1) * chunk_length] + for i in range(int(number_of_chunks))] + + +def which(program): + """ + Mimics behavior of UNIX which command. + """ + # Add .exe program extension for windows support + if os.name == "nt" and not program.endswith(".exe"): + program += ".exe" + + envdir_list = [os.curdir] + os.environ["PATH"].split(os.pathsep) + + for envdir in envdir_list: + program_path = os.path.join(envdir, program) + if os.path.isfile(program_path) and os.access(program_path, os.X_OK): + return program_path + + +def get_encoder_name(): + """ + Return enconder default application for system, either avconv or ffmpeg + """ + if which("avconv"): + return "avconv" + elif which("ffmpeg"): + return "ffmpeg" + else: + # should raise exception + warn("Couldn't find ffmpeg or avconv - defaulting to ffmpeg, but may not work", RuntimeWarning) + return "ffmpeg" + + +def get_player_name(): + """ + Return enconder default application for system, either avconv or ffmpeg + """ + if which("avplay"): + return "avplay" + elif which("ffplay"): + return "ffplay" + else: + # should raise exception + warn("Couldn't find ffplay or avplay - defaulting to ffplay, but may not work", RuntimeWarning) + return "ffplay" + + +def get_prober_name(): + """ + Return probe application, either avconv or ffmpeg + """ + if which("avprobe"): + return "avprobe" + elif which("ffprobe"): + return "ffprobe" + else: + # should raise exception + warn("Couldn't find ffprobe or avprobe - defaulting to ffprobe, but may not work", RuntimeWarning) + return "ffprobe" + + +def fsdecode(filename): + """Wrapper for os.fsdecode which was introduced in python 3.2 .""" + + if sys.version_info >= (3, 2): + PathLikeTypes = (basestring, bytes) + if sys.version_info >= (3, 6): + PathLikeTypes += (os.PathLike,) + if isinstance(filename, PathLikeTypes): + return os.fsdecode(filename) + else: + if isinstance(filename, bytes): + return filename.decode(sys.getfilesystemencoding()) + if isinstance(filename, basestring): + return filename + + raise TypeError("type {0} not accepted by fsdecode".format(type(filename))) + + +def get_extra_info(stderr): + """ + avprobe sometimes gives more information on stderr than + on the json output. The information has to be extracted + from stderr of the format of: + ' Stream #0:0: Audio: flac, 88200 Hz, stereo, s32 (24 bit)' + or (macOS version): + ' Stream #0:0: Audio: vorbis' + ' 44100 Hz, stereo, fltp, 320 kb/s' + + :type stderr: str + :rtype: list of dict + """ + extra_info = {} + + re_stream = r'(?P +)Stream #0[:\.](?P([0-9]+))(?P.+)\n?(?! *Stream)((?P +)(?P.+))?' + for i in re.finditer(re_stream, stderr): + if i.group('space_end') is not None and len(i.group('space_start')) <= len( + i.group('space_end')): + content_line = ','.join([i.group('content_0'), i.group('content_1')]) + else: + content_line = i.group('content_0') + tokens = [x.strip() for x in re.split('[:,]', content_line) if x] + extra_info[int(i.group('stream_id'))] = tokens + return extra_info + + +def mediainfo_json(filepath, read_ahead_limit=-1): + """Return json dictionary with media info(codec, duration, size, bitrate...) from filepath + """ + prober = get_prober_name() + command_args = [ + "-v", "info", + "-show_format", + "-show_streams", + ] + try: + command_args += [fsdecode(filepath)] + stdin_parameter = None + stdin_data = None + except TypeError: + if prober == 'ffprobe': + command_args += ["-read_ahead_limit", str(read_ahead_limit), + "cache:pipe:0"] + else: + command_args += ["-"] + stdin_parameter = PIPE + file, close_file = _fd_or_path_or_tempfile(filepath, 'rb', tempfile=False) + file.seek(0) + stdin_data = file.read() + if close_file: + file.close() + + command = [prober, '-of', 'json'] + command_args + res = Popen(command, stdin=stdin_parameter, stdout=PIPE, stderr=PIPE) + output, stderr = res.communicate(input=stdin_data) + output = output.decode("utf-8", 'ignore') + stderr = stderr.decode("utf-8", 'ignore') + + info = json.loads(output) + + if not info: + # If ffprobe didn't give any information, just return it + # (for example, because the file doesn't exist) + return info + + extra_info = get_extra_info(stderr) + + audio_streams = [x for x in info['streams'] if x['codec_type'] == 'audio'] + if len(audio_streams) == 0: + return info + + # We just operate on the first audio stream in case there are more + stream = audio_streams[0] + + def set_property(stream, prop, value): + if prop not in stream or stream[prop] == 0: + stream[prop] = value + + for token in extra_info[stream['index']]: + m = re.match('([su]([0-9]{1,2})p?) \(([0-9]{1,2}) bit\)$', token) + m2 = re.match('([su]([0-9]{1,2})p?)( \(default\))?$', token) + if m: + set_property(stream, 'sample_fmt', m.group(1)) + set_property(stream, 'bits_per_sample', int(m.group(2))) + set_property(stream, 'bits_per_raw_sample', int(m.group(3))) + elif m2: + set_property(stream, 'sample_fmt', m2.group(1)) + set_property(stream, 'bits_per_sample', int(m2.group(2))) + set_property(stream, 'bits_per_raw_sample', int(m2.group(2))) + elif re.match('(flt)p?( \(default\))?$', token): + set_property(stream, 'sample_fmt', token) + set_property(stream, 'bits_per_sample', 32) + set_property(stream, 'bits_per_raw_sample', 32) + elif re.match('(dbl)p?( \(default\))?$', token): + set_property(stream, 'sample_fmt', token) + set_property(stream, 'bits_per_sample', 64) + set_property(stream, 'bits_per_raw_sample', 64) + return info + + +def mediainfo(filepath): + """Return dictionary with media info(codec, duration, size, bitrate...) from filepath + """ + + prober = get_prober_name() + command_args = [ + "-v", "quiet", + "-show_format", + "-show_streams", + filepath + ] + + command = [prober, '-of', 'old'] + command_args + res = Popen(command, stdout=PIPE) + output = res.communicate()[0].decode("utf-8") + + if res.returncode != 0: + command = [prober] + command_args + output = Popen(command, stdout=PIPE).communicate()[0].decode("utf-8") + + rgx = re.compile(r"(?:(?P.*?):)?(?P.*?)\=(?P.*?)$") + info = {} + + if sys.platform == 'win32': + output = output.replace("\r", "") + + for line in output.split("\n"): + # print(line) + mobj = rgx.match(line) + + if mobj: + # print(mobj.groups()) + inner_dict, key, value = mobj.groups() + + if inner_dict: + try: + info[inner_dict] + except KeyError: + info[inner_dict] = {} + info[inner_dict][key] = value + else: + info[key] = value + + return info + + +def cache_codecs(function): + cache = {} + + @wraps(function) + def wrapper(): + try: + return cache[0] + except: + cache[0] = function() + return cache[0] + + return wrapper + + +@cache_codecs +def get_supported_codecs(): + encoder = get_encoder_name() + command = [encoder, "-codecs"] + res = Popen(command, stdout=PIPE, stderr=PIPE) + output = res.communicate()[0].decode("utf-8") + if res.returncode != 0: + return [] + + if sys.platform == 'win32': + output = output.replace("\r", "") + + + rgx = re.compile(r"^([D.][E.][AVS.][I.][L.][S.]) (\w*) +(.*)") + decoders = set() + encoders = set() + for line in output.split('\n'): + match = rgx.match(line.strip()) + if not match: + continue + flags, codec, name = match.groups() + + if flags[0] == 'D': + decoders.add(codec) + + if flags[1] == 'E': + encoders.add(codec) + + return (decoders, encoders) + + +def get_supported_decoders(): + return get_supported_codecs()[0] + + +def get_supported_encoders(): + return get_supported_codecs()[1] + +def stereo_to_ms(audio_segment): + ''' + Left-Right -> Mid-Side + ''' + channel = audio_segment.split_to_mono() + channel = [channel[0].overlay(channel[1]), channel[0].overlay(channel[1].invert_phase())] + return AudioSegment.from_mono_audiosegments(channel[0], channel[1]) + +def ms_to_stereo(audio_segment): + ''' + Mid-Side -> Left-Right + ''' + channel = audio_segment.split_to_mono() + channel = [channel[0].overlay(channel[1]) - 3, channel[0].overlay(channel[1].invert_phase()) - 3] + return AudioSegment.from_mono_audiosegments(channel[0], channel[1]) + diff --git a/hackaton/lib/python3.12/site-packages/python_dotenv-1.0.1.dist-info/INSTALLER b/hackaton/lib/python3.12/site-packages/python_dotenv-1.0.1.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/python_dotenv-1.0.1.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/hackaton/lib/python3.12/site-packages/python_dotenv-1.0.1.dist-info/LICENSE b/hackaton/lib/python3.12/site-packages/python_dotenv-1.0.1.dist-info/LICENSE new file mode 100644 index 0000000..3a97119 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/python_dotenv-1.0.1.dist-info/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2014, Saurabh Kumar (python-dotenv), 2013, Ted Tieken (django-dotenv-rw), 2013, Jacob Kaplan-Moss (django-dotenv) + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +- Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + +- Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +- Neither the name of django-dotenv nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/hackaton/lib/python3.12/site-packages/python_dotenv-1.0.1.dist-info/METADATA b/hackaton/lib/python3.12/site-packages/python_dotenv-1.0.1.dist-info/METADATA new file mode 100644 index 0000000..b9af7fe --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/python_dotenv-1.0.1.dist-info/METADATA @@ -0,0 +1,692 @@ +Metadata-Version: 2.1 +Name: python-dotenv +Version: 1.0.1 +Summary: Read key-value pairs from a .env file and set them as environment variables +Home-page: https://github.com/theskumar/python-dotenv +Author: Saurabh Kumar +Author-email: me+github@saurabh-kumar.com +License: BSD-3-Clause +Keywords: environment variables,deployments,settings,env,dotenv,configurations,python +Classifier: Development Status :: 5 - Production/Stable +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: System Administrators +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Topic :: System :: Systems Administration +Classifier: Topic :: Utilities +Classifier: Environment :: Web Environment +Requires-Python: >=3.8 +Description-Content-Type: text/markdown +License-File: LICENSE +Provides-Extra: cli +Requires-Dist: click >=5.0 ; extra == 'cli' + +# python-dotenv + +[![Build Status][build_status_badge]][build_status_link] +[![PyPI version][pypi_badge]][pypi_link] + +Python-dotenv reads key-value pairs from a `.env` file and can set them as environment +variables. It helps in the development of applications following the +[12-factor](https://12factor.net/) principles. + +- [Getting Started](#getting-started) +- [Other Use Cases](#other-use-cases) + * [Load configuration without altering the environment](#load-configuration-without-altering-the-environment) + * [Parse configuration as a stream](#parse-configuration-as-a-stream) + * [Load .env files in IPython](#load-env-files-in-ipython) +- [Command-line Interface](#command-line-interface) +- [File format](#file-format) + * [Multiline values](#multiline-values) + * [Variable expansion](#variable-expansion) +- [Related Projects](#related-projects) +- [Acknowledgements](#acknowledgements) + +## Getting Started + +```shell +pip install python-dotenv +``` + +If your application takes its configuration from environment variables, like a 12-factor +application, launching it in development is not very practical because you have to set +those environment variables yourself. + +To help you with that, you can add Python-dotenv to your application to make it load the +configuration from a `.env` file when it is present (e.g. in development) while remaining +configurable via the environment: + +```python +from dotenv import load_dotenv + +load_dotenv() # take environment variables from .env. + +# Code of your application, which uses environment variables (e.g. from `os.environ` or +# `os.getenv`) as if they came from the actual environment. +``` + +By default, `load_dotenv` doesn't override existing environment variables. + +To configure the development environment, add a `.env` in the root directory of your +project: + +``` +. +├── .env +└── foo.py +``` + +The syntax of `.env` files supported by python-dotenv is similar to that of Bash: + +```bash +# Development settings +DOMAIN=example.org +ADMIN_EMAIL=admin@${DOMAIN} +ROOT_URL=${DOMAIN}/app +``` + +If you use variables in values, ensure they are surrounded with `{` and `}`, like +`${DOMAIN}`, as bare variables such as `$DOMAIN` are not expanded. + +You will probably want to add `.env` to your `.gitignore`, especially if it contains +secrets like a password. + +See the section "File format" below for more information about what you can write in a +`.env` file. + +## Other Use Cases + +### Load configuration without altering the environment + +The function `dotenv_values` works more or less the same way as `load_dotenv`, except it +doesn't touch the environment, it just returns a `dict` with the values parsed from the +`.env` file. + +```python +from dotenv import dotenv_values + +config = dotenv_values(".env") # config = {"USER": "foo", "EMAIL": "foo@example.org"} +``` + +This notably enables advanced configuration management: + +```python +import os +from dotenv import dotenv_values + +config = { + **dotenv_values(".env.shared"), # load shared development variables + **dotenv_values(".env.secret"), # load sensitive variables + **os.environ, # override loaded values with environment variables +} +``` + +### Parse configuration as a stream + +`load_dotenv` and `dotenv_values` accept [streams][python_streams] via their `stream` +argument. It is thus possible to load the variables from sources other than the +filesystem (e.g. the network). + +```python +from io import StringIO + +from dotenv import load_dotenv + +config = StringIO("USER=foo\nEMAIL=foo@example.org") +load_dotenv(stream=config) +``` + +### Load .env files in IPython + +You can use dotenv in IPython. By default, it will use `find_dotenv` to search for a +`.env` file: + +```python +%load_ext dotenv +%dotenv +``` + +You can also specify a path: + +```python +%dotenv relative/or/absolute/path/to/.env +``` + +Optional flags: + +- `-o` to override existing variables. +- `-v` for increased verbosity. + +## Command-line Interface + +A CLI interface `dotenv` is also included, which helps you manipulate the `.env` file +without manually opening it. + +```shell +$ pip install "python-dotenv[cli]" +$ dotenv set USER foo +$ dotenv set EMAIL foo@example.org +$ dotenv list +USER=foo +EMAIL=foo@example.org +$ dotenv list --format=json +{ + "USER": "foo", + "EMAIL": "foo@example.org" +} +$ dotenv run -- python foo.py +``` + +Run `dotenv --help` for more information about the options and subcommands. + +## File format + +The format is not formally specified and still improves over time. That being said, +`.env` files should mostly look like Bash files. + +Keys can be unquoted or single-quoted. Values can be unquoted, single- or double-quoted. +Spaces before and after keys, equal signs, and values are ignored. Values can be followed +by a comment. Lines can start with the `export` directive, which does not affect their +interpretation. + +Allowed escape sequences: + +- in single-quoted values: `\\`, `\'` +- in double-quoted values: `\\`, `\'`, `\"`, `\a`, `\b`, `\f`, `\n`, `\r`, `\t`, `\v` + +### Multiline values + +It is possible for single- or double-quoted values to span multiple lines. The following +examples are equivalent: + +```bash +FOO="first line +second line" +``` + +```bash +FOO="first line\nsecond line" +``` + +### Variable without a value + +A variable can have no value: + +```bash +FOO +``` + +It results in `dotenv_values` associating that variable name with the value `None` (e.g. +`{"FOO": None}`. `load_dotenv`, on the other hand, simply ignores such variables. + +This shouldn't be confused with `FOO=`, in which case the variable is associated with the +empty string. + +### Variable expansion + +Python-dotenv can interpolate variables using POSIX variable expansion. + +With `load_dotenv(override=True)` or `dotenv_values()`, the value of a variable is the +first of the values defined in the following list: + +- Value of that variable in the `.env` file. +- Value of that variable in the environment. +- Default value, if provided. +- Empty string. + +With `load_dotenv(override=False)`, the value of a variable is the first of the values +defined in the following list: + +- Value of that variable in the environment. +- Value of that variable in the `.env` file. +- Default value, if provided. +- Empty string. + +## Related Projects + +- [Honcho](https://github.com/nickstenning/honcho) - For managing + Procfile-based applications. +- [django-dotenv](https://github.com/jpadilla/django-dotenv) +- [django-environ](https://github.com/joke2k/django-environ) +- [django-environ-2](https://github.com/sergeyklay/django-environ-2) +- [django-configuration](https://github.com/jezdez/django-configurations) +- [dump-env](https://github.com/sobolevn/dump-env) +- [environs](https://github.com/sloria/environs) +- [dynaconf](https://github.com/rochacbruno/dynaconf) +- [parse_it](https://github.com/naorlivne/parse_it) +- [python-decouple](https://github.com/HBNetwork/python-decouple) + +## Acknowledgements + +This project is currently maintained by [Saurabh Kumar](https://saurabh-kumar.com) and +[Bertrand Bonnefoy-Claudet](https://github.com/bbc2) and would not have been possible +without the support of these [awesome +people](https://github.com/theskumar/python-dotenv/graphs/contributors). + +[build_status_badge]: https://github.com/theskumar/python-dotenv/actions/workflows/test.yml/badge.svg +[build_status_link]: https://github.com/theskumar/python-dotenv/actions/workflows/test.yml +[pypi_badge]: https://badge.fury.io/py/python-dotenv.svg +[pypi_link]: https://badge.fury.io/py/python-dotenv +[python_streams]: https://docs.python.org/3/library/io.html + +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this +project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [1.0.1] - 2024-01-23 + +**Fixed** + +* Gracefully handle code which has been imported from a zipfile ([#456] by [@samwyma]) +* Allow modules using load_dotenv to be reloaded when launched in a separate thread ([#497] by [@freddyaboulton]) +* Fix file not closed after deletion, handle error in the rewrite function ([#469] by [@Qwerty-133]) + +**Misc** +* Use pathlib.Path in tests ([#466] by [@eumiro]) +* Fix year in release date in changelog.md ([#454] by [@jankislinger]) +* Use https in README links ([#474] by [@Nicals]) + +## [1.0.0] - 2023-02-24 + +**Fixed** + +* Drop support for python 3.7, add python 3.12-dev (#449 by [@theskumar]) +* Handle situations where the cwd does not exist. (#446 by [@jctanner]) + +## [0.21.1] - 2023-01-21 + +**Added** + +* Use Python 3.11 non-beta in CI (#438 by [@bbc2]) +* Modernize variables code (#434 by [@Nougat-Waffle]) +* Modernize main.py and parser.py code (#435 by [@Nougat-Waffle]) +* Improve conciseness of cli.py and __init__.py (#439 by [@Nougat-Waffle]) +* Improve error message for `get` and `list` commands when env file can't be opened (#441 by [@bbc2]) +* Updated License to align with BSD OSI template (#433 by [@lsmith77]) + + +**Fixed** + +* Fix Out-of-scope error when "dest" variable is undefined (#413 by [@theGOTOguy]) +* Fix IPython test warning about deprecated `magic` (#440 by [@bbc2]) +* Fix type hint for dotenv_path var, add StrPath alias (#432 by [@eaf]) + +## [0.21.0] - 2022-09-03 + +**Added** + +* CLI: add support for invocations via 'python -m'. (#395 by [@theskumar]) +* `load_dotenv` function now returns `False`. (#388 by [@larsks]) +* CLI: add --format= option to list command. (#407 by [@sammck]) + +**Fixed** + +* Drop Python 3.5 and 3.6 and upgrade GA (#393 by [@eggplants]) +* Use `open` instead of `io.open`. (#389 by [@rabinadk1]) +* Improve documentation for variables without a value (#390 by [@bbc2]) +* Add `parse_it` to Related Projects (#410 by [@naorlivne]) +* Update README.md (#415 by [@harveer07]) +* Improve documentation with direct use of MkDocs (#398 by [@bbc2]) + +## [0.20.0] - 2022-03-24 + +**Added** + +- Add `encoding` (`Optional[str]`) parameter to `get_key`, `set_key` and `unset_key`. + (#379 by [@bbc2]) + +**Fixed** + +- Use dict to specify the `entry_points` parameter of `setuptools.setup` (#376 by + [@mgorny]). +- Don't build universal wheels (#387 by [@bbc2]). + +## [0.19.2] - 2021-11-11 + +**Fixed** + +- In `set_key`, add missing newline character before new entry if necessary. (#361 by + [@bbc2]) + +## [0.19.1] - 2021-08-09 + +**Added** + +- Add support for Python 3.10. (#359 by [@theskumar]) + +## [0.19.0] - 2021-07-24 + +**Changed** + +- Require Python 3.5 or a later version. Python 2 and 3.4 are no longer supported. (#341 + by [@bbc2]). + +**Added** + +- The `dotenv_path` argument of `set_key` and `unset_key` now has a type of `Union[str, + os.PathLike]` instead of just `os.PathLike` (#347 by [@bbc2]). +- The `stream` argument of `load_dotenv` and `dotenv_values` can now be a text stream + (`IO[str]`), which includes values like `io.StringIO("foo")` and `open("file.env", + "r")` (#348 by [@bbc2]). + +## [0.18.0] - 2021-06-20 + +**Changed** + +- Raise `ValueError` if `quote_mode` isn't one of `always`, `auto` or `never` in + `set_key` (#330 by [@bbc2]). +- When writing a value to a .env file with `set_key` or `dotenv set ` (#330 + by [@bbc2]): + - Use single quotes instead of double quotes. + - Don't strip surrounding quotes. + - In `auto` mode, don't add quotes if the value is only made of alphanumeric characters + (as determined by `string.isalnum`). + +## [0.17.1] - 2021-04-29 + +**Fixed** + +- Fixed tests for build environments relying on `PYTHONPATH` (#318 by [@befeleme]). + +## [0.17.0] - 2021-04-02 + +**Changed** + +- Make `dotenv get ` only show the value, not `key=value` (#313 by [@bbc2]). + +**Added** + +- Add `--override`/`--no-override` option to `dotenv run` (#312 by [@zueve] and [@bbc2]). + +## [0.16.0] - 2021-03-27 + +**Changed** + +- The default value of the `encoding` parameter for `load_dotenv` and `dotenv_values` is + now `"utf-8"` instead of `None` (#306 by [@bbc2]). +- Fix resolution order in variable expansion with `override=False` (#287 by [@bbc2]). + +## [0.15.0] - 2020-10-28 + +**Added** + +- Add `--export` option to `set` to make it prepend the binding with `export` (#270 by + [@jadutter]). + +**Changed** + +- Make `set` command create the `.env` file in the current directory if no `.env` file was + found (#270 by [@jadutter]). + +**Fixed** + +- Fix potentially empty expanded value for duplicate key (#260 by [@bbc2]). +- Fix import error on Python 3.5.0 and 3.5.1 (#267 by [@gongqingkui]). +- Fix parsing of unquoted values containing several adjacent space or tab characters + (#277 by [@bbc2], review by [@x-yuri]). + +## [0.14.0] - 2020-07-03 + +**Changed** + +- Privilege definition in file over the environment in variable expansion (#256 by + [@elbehery95]). + +**Fixed** + +- Improve error message for when file isn't found (#245 by [@snobu]). +- Use HTTPS URL in package meta data (#251 by [@ekohl]). + +## [0.13.0] - 2020-04-16 + +**Added** + +- Add support for a Bash-like default value in variable expansion (#248 by [@bbc2]). + +## [0.12.0] - 2020-02-28 + +**Changed** + +- Use current working directory to find `.env` when bundled by PyInstaller (#213 by + [@gergelyk]). + +**Fixed** + +- Fix escaping of quoted values written by `set_key` (#236 by [@bbc2]). +- Fix `dotenv run` crashing on environment variables without values (#237 by [@yannham]). +- Remove warning when last line is empty (#238 by [@bbc2]). + +## [0.11.0] - 2020-02-07 + +**Added** + +- Add `interpolate` argument to `load_dotenv` and `dotenv_values` to disable interpolation + (#232 by [@ulyssessouza]). + +**Changed** + +- Use logging instead of warnings (#231 by [@bbc2]). + +**Fixed** + +- Fix installation in non-UTF-8 environments (#225 by [@altendky]). +- Fix PyPI classifiers (#228 by [@bbc2]). + +## [0.10.5] - 2020-01-19 + +**Fixed** + +- Fix handling of malformed lines and lines without a value (#222 by [@bbc2]): + - Don't print warning when key has no value. + - Reject more malformed lines (e.g. "A: B", "a='b',c"). +- Fix handling of lines with just a comment (#224 by [@bbc2]). + +## [0.10.4] - 2020-01-17 + +**Added** + +- Make typing optional (#179 by [@techalchemy]). +- Print a warning on malformed line (#211 by [@bbc2]). +- Support keys without a value (#220 by [@ulyssessouza]). + +## 0.10.3 + +- Improve interactive mode detection ([@andrewsmith])([#183]). +- Refactor parser to fix parsing inconsistencies ([@bbc2])([#170]). + - Interpret escapes as control characters only in double-quoted strings. + - Interpret `#` as start of comment only if preceded by whitespace. + +## 0.10.2 + +- Add type hints and expose them to users ([@qnighy])([#172]) +- `load_dotenv` and `dotenv_values` now accept an `encoding` parameter, defaults to `None` + ([@theskumar])([@earlbread])([#161]) +- Fix `str`/`unicode` inconsistency in Python 2: values are always `str` now. ([@bbc2])([#121]) +- Fix Unicode error in Python 2, introduced in 0.10.0. ([@bbc2])([#176]) + +## 0.10.1 +- Fix parsing of variable without a value ([@asyncee])([@bbc2])([#158]) + +## 0.10.0 + +- Add support for UTF-8 in unquoted values ([@bbc2])([#148]) +- Add support for trailing comments ([@bbc2])([#148]) +- Add backslashes support in values ([@bbc2])([#148]) +- Add support for newlines in values ([@bbc2])([#148]) +- Force environment variables to str with Python2 on Windows ([@greyli]) +- Drop Python 3.3 support ([@greyli]) +- Fix stderr/-out/-in redirection ([@venthur]) + + +## 0.9.0 + +- Add `--version` parameter to cli ([@venthur]) +- Enable loading from current directory ([@cjauvin]) +- Add 'dotenv run' command for calling arbitrary shell script with .env ([@venthur]) + +## 0.8.1 + +- Add tests for docs ([@Flimm]) +- Make 'cli' support optional. Use `pip install python-dotenv[cli]`. ([@theskumar]) + +## 0.8.0 + +- `set_key` and `unset_key` only modified the affected file instead of + parsing and re-writing file, this causes comments and other file + entact as it is. +- Add support for `export` prefix in the line. +- Internal refractoring ([@theskumar]) +- Allow `load_dotenv` and `dotenv_values` to work with `StringIO())` ([@alanjds])([@theskumar])([#78]) + +## 0.7.1 + +- Remove hard dependency on iPython ([@theskumar]) + +## 0.7.0 + +- Add support to override system environment variable via .env. + ([@milonimrod](https://github.com/milonimrod)) + ([\#63](https://github.com/theskumar/python-dotenv/issues/63)) +- Disable ".env not found" warning by default + ([@maxkoryukov](https://github.com/maxkoryukov)) + ([\#57](https://github.com/theskumar/python-dotenv/issues/57)) + +## 0.6.5 + +- Add support for special characters `\`. + ([@pjona](https://github.com/pjona)) + ([\#60](https://github.com/theskumar/python-dotenv/issues/60)) + +## 0.6.4 + +- Fix issue with single quotes ([@Flimm]) + ([\#52](https://github.com/theskumar/python-dotenv/issues/52)) + +## 0.6.3 + +- Handle unicode exception in setup.py + ([\#46](https://github.com/theskumar/python-dotenv/issues/46)) + +## 0.6.2 + +- Fix dotenv list command ([@ticosax](https://github.com/ticosax)) +- Add iPython Support + ([@tillahoffmann](https://github.com/tillahoffmann)) + +## 0.6.0 + +- Drop support for Python 2.6 +- Handle escaped characters and newlines in quoted values. (Thanks + [@iameugenejo](https://github.com/iameugenejo)) +- Remove any spaces around unquoted key/value. (Thanks + [@paulochf](https://github.com/paulochf)) +- Added POSIX variable expansion. (Thanks + [@hugochinchilla](https://github.com/hugochinchilla)) + +## 0.5.1 + +- Fix find\_dotenv - it now start search from the file where this + function is called from. + +## 0.5.0 + +- Add `find_dotenv` method that will try to find a `.env` file. + (Thanks [@isms](https://github.com/isms)) + +## 0.4.0 + +- cli: Added `-q/--quote` option to control the behaviour of quotes + around values in `.env`. (Thanks + [@hugochinchilla](https://github.com/hugochinchilla)). +- Improved test coverage. + +[#78]: https://github.com/theskumar/python-dotenv/issues/78 +[#121]: https://github.com/theskumar/python-dotenv/issues/121 +[#148]: https://github.com/theskumar/python-dotenv/issues/148 +[#158]: https://github.com/theskumar/python-dotenv/issues/158 +[#170]: https://github.com/theskumar/python-dotenv/issues/170 +[#172]: https://github.com/theskumar/python-dotenv/issues/172 +[#176]: https://github.com/theskumar/python-dotenv/issues/176 +[#183]: https://github.com/theskumar/python-dotenv/issues/183 +[#359]: https://github.com/theskumar/python-dotenv/issues/359 +[#469]: https://github.com/theskumar/python-dotenv/issues/469 +[#456]: https://github.com/theskumar/python-dotenv/issues/456 +[#466]: https://github.com/theskumar/python-dotenv/issues/466 +[#454]: https://github.com/theskumar/python-dotenv/issues/454 +[#474]: https://github.com/theskumar/python-dotenv/issues/474 + +[@alanjds]: https://github.com/alanjds +[@altendky]: https://github.com/altendky +[@andrewsmith]: https://github.com/andrewsmith +[@asyncee]: https://github.com/asyncee +[@bbc2]: https://github.com/bbc2 +[@befeleme]: https://github.com/befeleme +[@cjauvin]: https://github.com/cjauvin +[@eaf]: https://github.com/eaf +[@earlbread]: https://github.com/earlbread +[@eggplants]: https://github.com/@eggplants +[@ekohl]: https://github.com/ekohl +[@elbehery95]: https://github.com/elbehery95 +[@eumiro]: https://github.com/eumiro +[@Flimm]: https://github.com/Flimm +[@freddyaboulton]: https://github.com/freddyaboulton +[@gergelyk]: https://github.com/gergelyk +[@gongqingkui]: https://github.com/gongqingkui +[@greyli]: https://github.com/greyli +[@harveer07]: https://github.com/@harveer07 +[@jadutter]: https://github.com/jadutter +[@jankislinger]: https://github.com/jankislinger +[@jctanner]: https://github.com/jctanner +[@larsks]: https://github.com/@larsks +[@lsmith77]: https://github.com/lsmith77 +[@mgorny]: https://github.com/mgorny +[@naorlivne]: https://github.com/@naorlivne +[@Nicals]: https://github.com/Nicals +[@Nougat-Waffle]: https://github.com/Nougat-Waffle +[@qnighy]: https://github.com/qnighy +[@Qwerty-133]: https://github.com/Qwerty-133 +[@rabinadk1]: https://github.com/@rabinadk1 +[@sammck]: https://github.com/@sammck +[@samwyma]: https://github.com/samwyma +[@snobu]: https://github.com/snobu +[@techalchemy]: https://github.com/techalchemy +[@theGOTOguy]: https://github.com/theGOTOguy +[@theskumar]: https://github.com/theskumar +[@ulyssessouza]: https://github.com/ulyssessouza +[@venthur]: https://github.com/venthur +[@x-yuri]: https://github.com/x-yuri +[@yannham]: https://github.com/yannham +[@zueve]: https://github.com/zueve + + +[Unreleased]: https://github.com/theskumar/python-dotenv/compare/v1.0.1...HEAD +[1.0.1]: https://github.com/theskumar/python-dotenv/compare/v1.0.0...v1.0.1 +[1.0.0]: https://github.com/theskumar/python-dotenv/compare/v0.21.0...v1.0.0 +[0.21.1]: https://github.com/theskumar/python-dotenv/compare/v0.21.0...v0.21.1 +[0.21.0]: https://github.com/theskumar/python-dotenv/compare/v0.20.0...v0.21.0 +[0.20.0]: https://github.com/theskumar/python-dotenv/compare/v0.19.2...v0.20.0 +[0.19.2]: https://github.com/theskumar/python-dotenv/compare/v0.19.1...v0.19.2 +[0.19.1]: https://github.com/theskumar/python-dotenv/compare/v0.19.0...v0.19.1 +[0.19.0]: https://github.com/theskumar/python-dotenv/compare/v0.18.0...v0.19.0 +[0.18.0]: https://github.com/theskumar/python-dotenv/compare/v0.17.1...v0.18.0 +[0.17.1]: https://github.com/theskumar/python-dotenv/compare/v0.17.0...v0.17.1 +[0.17.0]: https://github.com/theskumar/python-dotenv/compare/v0.16.0...v0.17.0 +[0.16.0]: https://github.com/theskumar/python-dotenv/compare/v0.15.0...v0.16.0 +[0.15.0]: https://github.com/theskumar/python-dotenv/compare/v0.14.0...v0.15.0 +[0.14.0]: https://github.com/theskumar/python-dotenv/compare/v0.13.0...v0.14.0 +[0.13.0]: https://github.com/theskumar/python-dotenv/compare/v0.12.0...v0.13.0 +[0.12.0]: https://github.com/theskumar/python-dotenv/compare/v0.11.0...v0.12.0 +[0.11.0]: https://github.com/theskumar/python-dotenv/compare/v0.10.5...v0.11.0 +[0.10.5]: https://github.com/theskumar/python-dotenv/compare/v0.10.4...v0.10.5 +[0.10.4]: https://github.com/theskumar/python-dotenv/compare/v0.10.3...v0.10.4 diff --git a/hackaton/lib/python3.12/site-packages/python_dotenv-1.0.1.dist-info/RECORD b/hackaton/lib/python3.12/site-packages/python_dotenv-1.0.1.dist-info/RECORD new file mode 100644 index 0000000..e83b200 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/python_dotenv-1.0.1.dist-info/RECORD @@ -0,0 +1,25 @@ +../../../bin/dotenv,sha256=DFW481FSep-TMXMDxlXWNetPESe1iCVnZtQRR1Jyn0o,239 +dotenv/__init__.py,sha256=WBU5SfSiKAhS3hzu17ykNuuwbuwyDCX91Szv4vUeOuM,1292 +dotenv/__main__.py,sha256=N0RhLG7nHIqtlJHwwepIo-zbJPNx9sewCCRGY528h_4,129 +dotenv/__pycache__/__init__.cpython-312.pyc,, +dotenv/__pycache__/__main__.cpython-312.pyc,, +dotenv/__pycache__/cli.cpython-312.pyc,, +dotenv/__pycache__/ipython.cpython-312.pyc,, +dotenv/__pycache__/main.cpython-312.pyc,, +dotenv/__pycache__/parser.cpython-312.pyc,, +dotenv/__pycache__/variables.cpython-312.pyc,, +dotenv/__pycache__/version.cpython-312.pyc,, +dotenv/cli.py,sha256=_ttQuR9Yl4k1PT53ByISkDjJ3kO_N_LzIDZzZ95uXEk,5809 +dotenv/ipython.py,sha256=avI6aez_RxnBptYgchIquF2TSgKI-GOhY3ppiu3VuWE,1303 +dotenv/main.py,sha256=GV7Ki6JYPDa-xy2ZXHKqER-bRvKa7qqh0G0OwffYJr8,12098 +dotenv/parser.py,sha256=QgU5HwMwM2wMqt0vz6dHTJ4nzPmwqRqvi4MSyeVifgU,5186 +dotenv/py.typed,sha256=8PjyZ1aVoQpRVvt71muvuq5qE-jTFZkK-GLHkhdebmc,26 +dotenv/variables.py,sha256=CD0qXOvvpB3q5RpBQMD9qX6vHX7SyW-SuiwGMFSlt08,2348 +dotenv/version.py,sha256=d4QHYmS_30j0hPN8NmNPnQ_Z0TphDRbu4MtQj9cT9e8,22 +python_dotenv-1.0.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +python_dotenv-1.0.1.dist-info/LICENSE,sha256=gGGbcEnwjIFoOtDgHwjyV6hAZS3XHugxRtNmWMfSwrk,1556 +python_dotenv-1.0.1.dist-info/METADATA,sha256=fCkcTEUG3zknbuN1BK8e0PPCIgvPBLk-LneK0mRDM_s,23170 +python_dotenv-1.0.1.dist-info/RECORD,, +python_dotenv-1.0.1.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92 +python_dotenv-1.0.1.dist-info/entry_points.txt,sha256=yRl1rCbswb1nQTQ_gZRlCw5QfabztUGnfGWLhlXFNdI,47 +python_dotenv-1.0.1.dist-info/top_level.txt,sha256=eyqUH4SHJNr6ahOYlxIunTr4XinE8Z5ajWLdrK3r0D8,7 diff --git a/hackaton/lib/python3.12/site-packages/python_dotenv-1.0.1.dist-info/WHEEL b/hackaton/lib/python3.12/site-packages/python_dotenv-1.0.1.dist-info/WHEEL new file mode 100644 index 0000000..98c0d20 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/python_dotenv-1.0.1.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.42.0) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/hackaton/lib/python3.12/site-packages/python_dotenv-1.0.1.dist-info/entry_points.txt b/hackaton/lib/python3.12/site-packages/python_dotenv-1.0.1.dist-info/entry_points.txt new file mode 100644 index 0000000..0a86823 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/python_dotenv-1.0.1.dist-info/entry_points.txt @@ -0,0 +1,2 @@ +[console_scripts] +dotenv = dotenv.__main__:cli diff --git a/hackaton/lib/python3.12/site-packages/python_dotenv-1.0.1.dist-info/top_level.txt b/hackaton/lib/python3.12/site-packages/python_dotenv-1.0.1.dist-info/top_level.txt new file mode 100644 index 0000000..fe7c01a --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/python_dotenv-1.0.1.dist-info/top_level.txt @@ -0,0 +1 @@ +dotenv diff --git a/hackaton/lib/python3.12/site-packages/uvloop-0.21.0.dist-info/INSTALLER b/hackaton/lib/python3.12/site-packages/uvloop-0.21.0.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/uvloop-0.21.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/hackaton/lib/python3.12/site-packages/uvloop-0.21.0.dist-info/LICENSE-APACHE b/hackaton/lib/python3.12/site-packages/uvloop-0.21.0.dist-info/LICENSE-APACHE new file mode 100644 index 0000000..5f66d4e --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/uvloop-0.21.0.dist-info/LICENSE-APACHE @@ -0,0 +1,203 @@ +Copyright (C) 2016-present the uvloop authors and contributors. + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright (c) 2015-present MagicStack Inc. http://magic.io + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/hackaton/lib/python3.12/site-packages/uvloop-0.21.0.dist-info/LICENSE-MIT b/hackaton/lib/python3.12/site-packages/uvloop-0.21.0.dist-info/LICENSE-MIT new file mode 100644 index 0000000..40fd023 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/uvloop-0.21.0.dist-info/LICENSE-MIT @@ -0,0 +1,21 @@ +The MIT License + +Copyright (C) 2016-present the uvloop authors and contributors. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/hackaton/lib/python3.12/site-packages/uvloop-0.21.0.dist-info/METADATA b/hackaton/lib/python3.12/site-packages/uvloop-0.21.0.dist-info/METADATA new file mode 100644 index 0000000..49012e4 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/uvloop-0.21.0.dist-info/METADATA @@ -0,0 +1,175 @@ +Metadata-Version: 2.1 +Name: uvloop +Version: 0.21.0 +Summary: Fast implementation of asyncio event loop on top of libuv +Author-email: Yury Selivanov +License: MIT License +Project-URL: github, https://github.com/MagicStack/uvloop +Keywords: asyncio,networking +Classifier: Development Status :: 5 - Production/Stable +Classifier: Framework :: AsyncIO +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: Apache Software License +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: POSIX +Classifier: Operating System :: MacOS :: MacOS X +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Topic :: System :: Networking +Requires-Python: >=3.8.0 +Description-Content-Type: text/x-rst +License-File: LICENSE-APACHE +License-File: LICENSE-MIT +Provides-Extra: dev +Requires-Dist: setuptools>=60; extra == "dev" +Requires-Dist: Cython~=3.0; extra == "dev" +Provides-Extra: docs +Requires-Dist: Sphinx~=4.1.2; extra == "docs" +Requires-Dist: sphinxcontrib-asyncio~=0.3.0; extra == "docs" +Requires-Dist: sphinx-rtd-theme~=0.5.2; extra == "docs" +Provides-Extra: test +Requires-Dist: aiohttp>=3.10.5; extra == "test" +Requires-Dist: flake8~=5.0; extra == "test" +Requires-Dist: psutil; extra == "test" +Requires-Dist: pycodestyle~=2.9.0; extra == "test" +Requires-Dist: pyOpenSSL~=23.0.0; extra == "test" +Requires-Dist: mypy>=0.800; extra == "test" + +.. image:: https://img.shields.io/github/actions/workflow/status/MagicStack/uvloop/tests.yml?branch=master + :target: https://github.com/MagicStack/uvloop/actions/workflows/tests.yml?query=branch%3Amaster + +.. image:: https://img.shields.io/pypi/v/uvloop.svg + :target: https://pypi.python.org/pypi/uvloop + +.. image:: https://pepy.tech/badge/uvloop + :target: https://pepy.tech/project/uvloop + :alt: PyPI - Downloads + + +uvloop is a fast, drop-in replacement of the built-in asyncio +event loop. uvloop is implemented in Cython and uses libuv +under the hood. + +The project documentation can be found +`here `_. Please also check out the +`wiki `_. + + +Performance +----------- + +uvloop makes asyncio 2-4x faster. + +.. image:: https://raw.githubusercontent.com/MagicStack/uvloop/master/performance.png + :target: http://magic.io/blog/uvloop-blazing-fast-python-networking/ + +The above chart shows the performance of an echo server with different +message sizes. The *sockets* benchmark uses ``loop.sock_recv()`` and +``loop.sock_sendall()`` methods; the *streams* benchmark uses asyncio +high-level streams, created by the ``asyncio.start_server()`` function; +and the *protocol* benchmark uses ``loop.create_server()`` with a simple +echo protocol. Read more about uvloop in a +`blog post `_ +about it. + + +Installation +------------ + +uvloop requires Python 3.8 or greater and is available on PyPI. +Use pip to install it:: + + $ pip install uvloop + +Note that it is highly recommended to **upgrade pip before** installing +uvloop with:: + + $ pip install -U pip + + +Using uvloop +------------ + +As of uvloop 0.18, the preferred way of using it is via the +``uvloop.run()`` helper function: + + +.. code:: python + + import uvloop + + async def main(): + # Main entry-point. + ... + + uvloop.run(main()) + +``uvloop.run()`` works by simply configuring ``asyncio.run()`` +to use uvloop, passing all of the arguments to it, such as ``debug``, +e.g. ``uvloop.run(main(), debug=True)``. + +With Python 3.11 and earlier the following alternative +snippet can be used: + +.. code:: python + + import asyncio + import sys + + import uvloop + + async def main(): + # Main entry-point. + ... + + if sys.version_info >= (3, 11): + with asyncio.Runner(loop_factory=uvloop.new_event_loop) as runner: + runner.run(main()) + else: + uvloop.install() + asyncio.run(main()) + + +Building From Source +-------------------- + +To build uvloop, you'll need Python 3.8 or greater: + +1. Clone the repository: + + .. code:: + + $ git clone --recursive git@github.com:MagicStack/uvloop.git + $ cd uvloop + +2. Create a virtual environment and activate it: + + .. code:: + + $ python3 -m venv uvloop-dev + $ source uvloop-dev/bin/activate + +3. Install development dependencies: + + .. code:: + + $ pip install -e .[dev] + +4. Build and run tests: + + .. code:: + + $ make + $ make test + + +License +------- + +uvloop is dual-licensed under MIT and Apache 2.0 licenses. diff --git a/hackaton/lib/python3.12/site-packages/uvloop-0.21.0.dist-info/RECORD b/hackaton/lib/python3.12/site-packages/uvloop-0.21.0.dist-info/RECORD new file mode 100644 index 0000000..57f2d09 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/uvloop-0.21.0.dist-info/RECORD @@ -0,0 +1,69 @@ +uvloop-0.21.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +uvloop-0.21.0.dist-info/LICENSE-APACHE,sha256=N3AlKHeY-dzYGeH4JvpfxeLzglKGkasFKMXPjIwoLCc,11439 +uvloop-0.21.0.dist-info/LICENSE-MIT,sha256=bdTDmfJt4EPXeirX4x20y1vwjqg2iwpC1uFYY1zIq2I,1105 +uvloop-0.21.0.dist-info/METADATA,sha256=UnacVCAjauzcbHHE4UVtxI84a9-D1B5zy0Fi_T6NGu0,4899 +uvloop-0.21.0.dist-info/RECORD,, +uvloop-0.21.0.dist-info/WHEEL,sha256=JjfPN3tuNVktXeVx2CdYSsvuQYIjI_7f2R0S3yE0Gks,115 +uvloop-0.21.0.dist-info/top_level.txt,sha256=2cDaltyemYfQErB19s2jFmumeJRnbsZPJ7Lj9A78c6Y,7 +uvloop/__init__.py,sha256=CuY_C2LjdsJTwxAgU0tqRAU6Bb-XC0F5EUjJc70OZFc,5228 +uvloop/__pycache__/__init__.cpython-312.pyc,, +uvloop/__pycache__/_noop.cpython-312.pyc,, +uvloop/__pycache__/_testbase.cpython-312.pyc,, +uvloop/__pycache__/_version.cpython-312.pyc,, +uvloop/_noop.py,sha256=SDAJTiWhE7g3KyttbjPdliv-Uheuas-tKX4_y_nvO_Q,86 +uvloop/_testbase.py,sha256=sRZjHR-nMHv4UZ23AkSCtgEsvgo8uOqDchFNOFViiRg,15570 +uvloop/_version.py,sha256=pRhsSEFabYnSrcbRCuOkm0vrAr6wBs5E2NLUAzk-OqY,576 +uvloop/cbhandles.pxd,sha256=gW0spS84wbfuEHuYEbRSsHiKRmb5pfDHkYZvxhTC-Vo,752 +uvloop/cbhandles.pyx,sha256=PTQjEEN4yGloNP6lIHddNzDOFqowvGm_CvS9M6yHvc4,12298 +uvloop/dns.pyx,sha256=oHTr36ic6u9F-VFAAv0G92KY44O3-0x3ytcOAVvGmTs,14562 +uvloop/errors.pyx,sha256=2etYn89Th3tIsNMLl33Quc-1WkKKY7umPOVvilTzi9k,2774 +uvloop/handles/async_.pxd,sha256=xtsWSi0A67joJU4iFp5JWzQxwNj4LCq_KMDyDDMxdec,252 +uvloop/handles/async_.pyx,sha256=Hd_Bgi8I9uJZ20_2qUsHYYQtwq4LKtjTr3THQYKp-Sk,1516 +uvloop/handles/basetransport.pxd,sha256=SiDD77NPthTfjXVg12gJJGM1YYKZXw4AEK9tv22jJeE,1322 +uvloop/handles/basetransport.pyx,sha256=GtN3vdp6DDkh1g0RRPemj0r4x-Exskw-m16p_vY_E9g,9553 +uvloop/handles/check.pxd,sha256=IufFrzdMhLRc5zAjh7Lb0lAqw-UclrYVo-UgqIs6eJ0,276 +uvloop/handles/check.pyx,sha256=70d5oylnFnZjEJo_HBg5JYw2hE3PvkU3rhzALDEUOK8,1881 +uvloop/handles/fsevent.pxd,sha256=YfklQ9TeikRV2QRLNPAtkEwu_3vwrsOq9cMJxFV8VgI,325 +uvloop/handles/fsevent.pyx,sha256=RUV2-WhBo2OjXFn0N49l4th1DFZ0kdC-7YgsIZkUBoI,2823 +uvloop/handles/handle.pxd,sha256=QPjUCObkDwvjRAZFlolF1tNXFV9-jAf22V0KweiLdOA,1189 +uvloop/handles/handle.pyx,sha256=YOaN1fSPzo_IJA3IbG7E10pc-dbAN7y8DyGZoLgho-M,13248 +uvloop/handles/idle.pxd,sha256=L3Gr2tuzKHWEB2NnykwjbNyexNUlckBdGFKPufn5AZU,274 +uvloop/handles/idle.pyx,sha256=BXi_PQrgbPN2n3-QybHo0CLhW2m9N7benwSb4q7u87I,1859 +uvloop/handles/pipe.pxd,sha256=LzsEOwptkqNa52O1Iyqhxq2d4ppzmHr0x8cMwJIZZfk,933 +uvloop/handles/pipe.pyx,sha256=9xINAS1xZuPM87gS-QYVGwUn_4JhcqKwqJobjpHHGkM,7688 +uvloop/handles/poll.pxd,sha256=afAR6gAx52OnmPqaHa3y41xxtIYxam1w9XoNZRxNMwU,575 +uvloop/handles/poll.pyx,sha256=kjlhSrRyOHnH2tJJLmBtE0ePltUWTKphJ6ml8RP0Qhg,6511 +uvloop/handles/process.pxd,sha256=FKCuQWWzDL8r0N1phlwPJ_pGGY3TZsOl5rBQP4AlgYo,2314 +uvloop/handles/process.pyx,sha256=x89gE5JCApGshWqln-2qxYI_I262r5udmLCnBAyW--w,26919 +uvloop/handles/stream.pxd,sha256=1BASyhG8z9HDf4ZikWPqd-hldQgGSdHl3ta-nNEnChE,1535 +uvloop/handles/stream.pyx,sha256=bizhF7PRNmy3Zcd7anORwZRAsQx4tV31dhzqNf5_fAc,31856 +uvloop/handles/streamserver.pxd,sha256=hIDDhB2RK0lnMUscDWcGl2NRkclb6AYfche77YEdaes,786 +uvloop/handles/streamserver.pyx,sha256=quWwKo_rz4Jzq-YNLZQ7lmcBNLSzQBpf31nS64jhbrU,4632 +uvloop/handles/tcp.pxd,sha256=xNYy-df1tK5ywK3V7a0wWno9tAA7JH-FiIQ5F0296ZM,892 +uvloop/handles/tcp.pyx,sha256=22isLLJ9__U7Bx2ZQwWP3Mozt0DZ66aOLREW7adKGLs,7291 +uvloop/handles/timer.pxd,sha256=VcLZBfzd9ixuxmJrE9O3YmyVO4LfMDwcG7UNpJbTu40,440 +uvloop/handles/timer.pyx,sha256=zT35AW9Wv9H_zWa6sw7GOi4SB7HavGUobFezTFfSq6E,2416 +uvloop/handles/udp.pxd,sha256=gQn9FH4rAiXDR_kZNqaYcNMGMzFL-T1V1G8JI6JOHU8,671 +uvloop/handles/udp.pyx,sha256=_doWmjAsh3vPES_CLQ7j309f71qK_6YIBGKtimpjAO8,12039 +uvloop/includes/__init__.py,sha256=-OUZ6zr6Opdw78PKsHYi1AuP74Ep7XByxyoRYOuRtgI,361 +uvloop/includes/__pycache__/__init__.cpython-312.pyc,, +uvloop/includes/consts.pxi,sha256=m6K9HIUl8G3D9iOIzK0C3_chXKwIfsiq88j3VOvUuU4,843 +uvloop/includes/debug.pxd,sha256=cCnlyp6HkhQgVF7lAQPA31wIa1n1pn6eUY_wARYh3uA,64 +uvloop/includes/flowcontrol.pxd,sha256=7PuZtEgp4TS1Y3iNqZZInkDKI5iCylERrcLqe2ls3EY,458 +uvloop/includes/python.pxd,sha256=SSB2FPEsEt_Aif66l-SQvFpJ3I7TrgbL4lsiu_Kyu9k,846 +uvloop/includes/stdlib.pxi,sha256=k49jKoHwvBhVho5W95yQrPMKskonEhQpqi95GZe6RHM,6361 +uvloop/includes/system.pxd,sha256=pbXOeZeXaDZ0b3CIFOgObE5C-cr6vhi6io-F8wLIaNQ,2186 +uvloop/includes/uv.pxd,sha256=wkayMxCaI9RyxTb1sqkP6DdU6l_w9ql18SYAoEYSNiA,16080 +uvloop/loop.cpython-312-darwin.so,sha256=VVeK7ZA0A6tBbiNNkvVBJc0NzXybBCNrbiplNGvLx0Q,3942032 +uvloop/loop.pxd,sha256=1C4lOQV6MTWmvAnL67W3CvEyBdnDNYLEtCMPTZD40s8,6224 +uvloop/loop.pyi,sha256=xLLboc-tuzlu68RcUhghA-jjSy-mMNixiVDNY6TZueU,10504 +uvloop/loop.pyx,sha256=C2jMCvqkhswEcq9rjg0lbieAIXeksLiFyXQAz9tRI6g,118619 +uvloop/lru.pyx,sha256=nBZ4zuy4XjsdLorq-JhNS7WObcLpZWMr1OjyRvv8FaI,2279 +uvloop/pseudosock.pyx,sha256=M3H7qMGFXE9ZZLvYwOgBl3ZcNA5OKSnZ7NUGLJA7AlA,5383 +uvloop/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +uvloop/request.pxd,sha256=7yx8JlG0Hu2cv_i2QCZ_WdLlsGjI0z5eM_ueOOOgK6w,143 +uvloop/request.pyx,sha256=6-8Dme6LoT88B5-MzvmpuLn3hGt1eZlekvQxG0x2y8s,2259 +uvloop/server.pxd,sha256=_zRDiZMjsmlxJRo0KDzSM0xyfg2k-TzlGln54wvXC-Y,394 +uvloop/server.pyx,sha256=6wC5vUhAHnnUs7qHOJXvRkgov38IeY8xp6w45-rCRFc,3623 +uvloop/sslproto.pxd,sha256=fCM5XWu5ZSTDpf5_-wF2jvj77Y403yk40QOiWc0wo1s,3534 +uvloop/sslproto.pyx,sha256=EL1fckxojYK42OCAIJ-geUoKc0uncPH1hXg50roBQ-0,35381 diff --git a/hackaton/lib/python3.12/site-packages/uvloop-0.21.0.dist-info/WHEEL b/hackaton/lib/python3.12/site-packages/uvloop-0.21.0.dist-info/WHEEL new file mode 100644 index 0000000..f24abc1 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/uvloop-0.21.0.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: setuptools (75.1.0) +Root-Is-Purelib: false +Tag: cp312-cp312-macosx_10_13_universal2 + diff --git a/hackaton/lib/python3.12/site-packages/uvloop-0.21.0.dist-info/top_level.txt b/hackaton/lib/python3.12/site-packages/uvloop-0.21.0.dist-info/top_level.txt new file mode 100644 index 0000000..99d4716 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/uvloop-0.21.0.dist-info/top_level.txt @@ -0,0 +1 @@ +uvloop diff --git a/hackaton/lib/python3.12/site-packages/uvloop/__init__.py b/hackaton/lib/python3.12/site-packages/uvloop/__init__.py new file mode 100644 index 0000000..9bb6592 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/uvloop/__init__.py @@ -0,0 +1,168 @@ +import asyncio as __asyncio +import typing as _typing +import sys as _sys +import warnings as _warnings + +from asyncio.events import BaseDefaultEventLoopPolicy as __BasePolicy + +from . import includes as __includes # NOQA +from .loop import Loop as __BaseLoop # NOQA +from ._version import __version__ # NOQA + + +__all__ = ('new_event_loop', 'install', 'EventLoopPolicy') + + +_T = _typing.TypeVar("_T") + + +class Loop(__BaseLoop, __asyncio.AbstractEventLoop): # type: ignore[misc] + pass + + +def new_event_loop() -> Loop: + """Return a new event loop.""" + return Loop() + + +def install() -> None: + """A helper function to install uvloop policy.""" + if _sys.version_info[:2] >= (3, 12): + _warnings.warn( + 'uvloop.install() is deprecated in favor of uvloop.run() ' + 'starting with Python 3.12.', + DeprecationWarning, + stacklevel=1, + ) + __asyncio.set_event_loop_policy(EventLoopPolicy()) + + +if _typing.TYPE_CHECKING: + def run( + main: _typing.Coroutine[_typing.Any, _typing.Any, _T], + *, + loop_factory: _typing.Optional[ + _typing.Callable[[], Loop] + ] = new_event_loop, + debug: _typing.Optional[bool]=None, + ) -> _T: + """The preferred way of running a coroutine with uvloop.""" +else: + def run(main, *, loop_factory=new_event_loop, debug=None, **run_kwargs): + """The preferred way of running a coroutine with uvloop.""" + + async def wrapper(): + # If `loop_factory` is provided we want it to return + # either uvloop.Loop or a subtype of it, assuming the user + # is using `uvloop.run()` intentionally. + loop = __asyncio._get_running_loop() + if not isinstance(loop, Loop): + raise TypeError('uvloop.run() uses a non-uvloop event loop') + return await main + + vi = _sys.version_info[:2] + + if vi <= (3, 10): + # Copied from python/cpython + + if __asyncio._get_running_loop() is not None: + raise RuntimeError( + "asyncio.run() cannot be called from a running event loop") + + if not __asyncio.iscoroutine(main): + raise ValueError( + "a coroutine was expected, got {!r}".format(main) + ) + + loop = loop_factory() + try: + __asyncio.set_event_loop(loop) + if debug is not None: + loop.set_debug(debug) + return loop.run_until_complete(wrapper()) + finally: + try: + _cancel_all_tasks(loop) + loop.run_until_complete(loop.shutdown_asyncgens()) + if hasattr(loop, 'shutdown_default_executor'): + loop.run_until_complete( + loop.shutdown_default_executor() + ) + finally: + __asyncio.set_event_loop(None) + loop.close() + + elif vi == (3, 11): + if __asyncio._get_running_loop() is not None: + raise RuntimeError( + "asyncio.run() cannot be called from a running event loop") + + with __asyncio.Runner( + loop_factory=loop_factory, + debug=debug, + **run_kwargs + ) as runner: + return runner.run(wrapper()) + + else: + assert vi >= (3, 12) + return __asyncio.run( + wrapper(), + loop_factory=loop_factory, + debug=debug, + **run_kwargs + ) + + +def _cancel_all_tasks(loop: __asyncio.AbstractEventLoop) -> None: + # Copied from python/cpython + + to_cancel = __asyncio.all_tasks(loop) + if not to_cancel: + return + + for task in to_cancel: + task.cancel() + + loop.run_until_complete( + __asyncio.gather(*to_cancel, return_exceptions=True) + ) + + for task in to_cancel: + if task.cancelled(): + continue + if task.exception() is not None: + loop.call_exception_handler({ + 'message': 'unhandled exception during asyncio.run() shutdown', + 'exception': task.exception(), + 'task': task, + }) + + +class EventLoopPolicy(__BasePolicy): + """Event loop policy. + + The preferred way to make your application use uvloop: + + >>> import asyncio + >>> import uvloop + >>> asyncio.set_event_loop_policy(uvloop.EventLoopPolicy()) + >>> asyncio.get_event_loop() + + """ + + def _loop_factory(self) -> Loop: + return new_event_loop() + + if _typing.TYPE_CHECKING: + # EventLoopPolicy doesn't implement these, but since they are marked + # as abstract in typeshed, we have to put them in so mypy thinks + # the base methods are overridden. This is the same approach taken + # for the Windows event loop policy classes in typeshed. + def get_child_watcher(self) -> _typing.NoReturn: + ... + + def set_child_watcher( + self, watcher: _typing.Any + ) -> _typing.NoReturn: + ... diff --git a/hackaton/lib/python3.12/site-packages/uvloop/__pycache__/__init__.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/uvloop/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..6759675 Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/uvloop/__pycache__/__init__.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/uvloop/__pycache__/_noop.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/uvloop/__pycache__/_noop.cpython-312.pyc new file mode 100644 index 0000000..53be467 Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/uvloop/__pycache__/_noop.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/uvloop/__pycache__/_testbase.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/uvloop/__pycache__/_testbase.cpython-312.pyc new file mode 100644 index 0000000..c812539 Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/uvloop/__pycache__/_testbase.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/uvloop/__pycache__/_version.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/uvloop/__pycache__/_version.cpython-312.pyc new file mode 100644 index 0000000..dfe2ce4 Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/uvloop/__pycache__/_version.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/uvloop/_noop.py b/hackaton/lib/python3.12/site-packages/uvloop/_noop.py new file mode 100644 index 0000000..bfc14dc --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/uvloop/_noop.py @@ -0,0 +1,3 @@ +def noop() -> None: + """Empty function to invoke CPython ceval loop.""" + return diff --git a/hackaton/lib/python3.12/site-packages/uvloop/_testbase.py b/hackaton/lib/python3.12/site-packages/uvloop/_testbase.py new file mode 100644 index 0000000..e620e15 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/uvloop/_testbase.py @@ -0,0 +1,552 @@ +"""Test utilities. Don't use outside of the uvloop project.""" + + +import asyncio +import asyncio.events +import collections +import contextlib +import gc +import logging +import os +import pprint +import re +import select +import socket +import ssl +import sys +import tempfile +import threading +import time +import unittest +import uvloop + + +class MockPattern(str): + def __eq__(self, other): + return bool(re.search(str(self), other, re.S)) + + +class TestCaseDict(collections.UserDict): + + def __init__(self, name): + super().__init__() + self.name = name + + def __setitem__(self, key, value): + if key in self.data: + raise RuntimeError('duplicate test {}.{}'.format( + self.name, key)) + super().__setitem__(key, value) + + +class BaseTestCaseMeta(type): + + @classmethod + def __prepare__(mcls, name, bases): + return TestCaseDict(name) + + def __new__(mcls, name, bases, dct): + for test_name in dct: + if not test_name.startswith('test_'): + continue + for base in bases: + if hasattr(base, test_name): + raise RuntimeError( + 'duplicate test {}.{} (also defined in {} ' + 'parent class)'.format( + name, test_name, base.__name__)) + + return super().__new__(mcls, name, bases, dict(dct)) + + +class BaseTestCase(unittest.TestCase, metaclass=BaseTestCaseMeta): + + def new_loop(self): + raise NotImplementedError + + def new_policy(self): + raise NotImplementedError + + def mock_pattern(self, str): + return MockPattern(str) + + async def wait_closed(self, obj): + if not isinstance(obj, asyncio.StreamWriter): + return + try: + await obj.wait_closed() + except (BrokenPipeError, ConnectionError): + pass + + def is_asyncio_loop(self): + return type(self.loop).__module__.startswith('asyncio.') + + def run_loop_briefly(self, *, delay=0.01): + self.loop.run_until_complete(asyncio.sleep(delay)) + + def loop_exception_handler(self, loop, context): + self.__unhandled_exceptions.append(context) + self.loop.default_exception_handler(context) + + def setUp(self): + self.loop = self.new_loop() + asyncio.set_event_loop_policy(self.new_policy()) + asyncio.set_event_loop(self.loop) + self._check_unclosed_resources_in_debug = True + + self.loop.set_exception_handler(self.loop_exception_handler) + self.__unhandled_exceptions = [] + + def tearDown(self): + self.loop.close() + + if self.__unhandled_exceptions: + print('Unexpected calls to loop.call_exception_handler():') + pprint.pprint(self.__unhandled_exceptions) + self.fail('unexpected calls to loop.call_exception_handler()') + return + + if not self._check_unclosed_resources_in_debug: + return + + # GC to show any resource warnings as the test completes + gc.collect() + gc.collect() + gc.collect() + + if getattr(self.loop, '_debug_cc', False): + gc.collect() + gc.collect() + gc.collect() + + self.assertEqual( + self.loop._debug_uv_handles_total, + self.loop._debug_uv_handles_freed, + 'not all uv_handle_t handles were freed') + + self.assertEqual( + self.loop._debug_cb_handles_count, 0, + 'not all callbacks (call_soon) are GCed') + + self.assertEqual( + self.loop._debug_cb_timer_handles_count, 0, + 'not all timer callbacks (call_later) are GCed') + + self.assertEqual( + self.loop._debug_stream_write_ctx_cnt, 0, + 'not all stream write contexts are GCed') + + for h_name, h_cnt in self.loop._debug_handles_current.items(): + with self.subTest('Alive handle after test', + handle_name=h_name): + self.assertEqual( + h_cnt, 0, + 'alive {} after test'.format(h_name)) + + for h_name, h_cnt in self.loop._debug_handles_total.items(): + with self.subTest('Total/closed handles', + handle_name=h_name): + self.assertEqual( + h_cnt, self.loop._debug_handles_closed[h_name], + 'total != closed for {}'.format(h_name)) + + asyncio.set_event_loop(None) + asyncio.set_event_loop_policy(None) + self.loop = None + + def skip_unclosed_handles_check(self): + self._check_unclosed_resources_in_debug = False + + def tcp_server(self, server_prog, *, + family=socket.AF_INET, + addr=None, + timeout=5, + backlog=1, + max_clients=10): + + if addr is None: + if family == socket.AF_UNIX: + with tempfile.NamedTemporaryFile() as tmp: + addr = tmp.name + else: + addr = ('127.0.0.1', 0) + + sock = socket.socket(family, socket.SOCK_STREAM) + + if timeout is None: + raise RuntimeError('timeout is required') + if timeout <= 0: + raise RuntimeError('only blocking sockets are supported') + sock.settimeout(timeout) + + try: + sock.bind(addr) + sock.listen(backlog) + except OSError as ex: + sock.close() + raise ex + + return TestThreadedServer( + self, sock, server_prog, timeout, max_clients) + + def tcp_client(self, client_prog, + family=socket.AF_INET, + timeout=10): + + sock = socket.socket(family, socket.SOCK_STREAM) + + if timeout is None: + raise RuntimeError('timeout is required') + if timeout <= 0: + raise RuntimeError('only blocking sockets are supported') + sock.settimeout(timeout) + + return TestThreadedClient( + self, sock, client_prog, timeout) + + def unix_server(self, *args, **kwargs): + return self.tcp_server(*args, family=socket.AF_UNIX, **kwargs) + + def unix_client(self, *args, **kwargs): + return self.tcp_client(*args, family=socket.AF_UNIX, **kwargs) + + @contextlib.contextmanager + def unix_sock_name(self): + with tempfile.TemporaryDirectory() as td: + fn = os.path.join(td, 'sock') + try: + yield fn + finally: + try: + os.unlink(fn) + except OSError: + pass + + def _abort_socket_test(self, ex): + try: + self.loop.stop() + finally: + self.fail(ex) + + +def _cert_fullname(test_file_name, cert_file_name): + fullname = os.path.abspath(os.path.join( + os.path.dirname(test_file_name), 'certs', cert_file_name)) + assert os.path.isfile(fullname) + return fullname + + +@contextlib.contextmanager +def silence_long_exec_warning(): + + class Filter(logging.Filter): + def filter(self, record): + return not (record.msg.startswith('Executing') and + record.msg.endswith('seconds')) + + logger = logging.getLogger('asyncio') + filter = Filter() + logger.addFilter(filter) + try: + yield + finally: + logger.removeFilter(filter) + + +def find_free_port(start_from=50000): + for port in range(start_from, start_from + 500): + sock = socket.socket() + with sock: + try: + sock.bind(('', port)) + except socket.error: + continue + else: + return port + raise RuntimeError('could not find a free port') + + +class SSLTestCase: + + def _create_server_ssl_context(self, certfile, keyfile=None): + if hasattr(ssl, 'PROTOCOL_TLS_SERVER'): + sslcontext = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) + elif hasattr(ssl, 'PROTOCOL_TLS'): + sslcontext = ssl.SSLContext(ssl.PROTOCOL_TLS) + else: + sslcontext = ssl.SSLContext(ssl.PROTOCOL_SSLv23) + sslcontext.options |= ssl.OP_NO_SSLv2 + sslcontext.load_cert_chain(certfile, keyfile) + return sslcontext + + def _create_client_ssl_context(self, *, disable_verify=True): + sslcontext = ssl.create_default_context() + sslcontext.check_hostname = False + if disable_verify: + sslcontext.verify_mode = ssl.CERT_NONE + return sslcontext + + @contextlib.contextmanager + def _silence_eof_received_warning(self): + # TODO This warning has to be fixed in asyncio. + logger = logging.getLogger('asyncio') + filter = logging.Filter('has no effect when using ssl') + logger.addFilter(filter) + try: + yield + finally: + logger.removeFilter(filter) + + +class UVTestCase(BaseTestCase): + + implementation = 'uvloop' + + def new_loop(self): + return uvloop.new_event_loop() + + def new_policy(self): + return uvloop.EventLoopPolicy() + + +class AIOTestCase(BaseTestCase): + + implementation = 'asyncio' + + def setUp(self): + super().setUp() + + if sys.version_info < (3, 12): + watcher = asyncio.SafeChildWatcher() + watcher.attach_loop(self.loop) + asyncio.set_child_watcher(watcher) + + def tearDown(self): + if sys.version_info < (3, 12): + asyncio.set_child_watcher(None) + super().tearDown() + + def new_loop(self): + return asyncio.new_event_loop() + + def new_policy(self): + return asyncio.DefaultEventLoopPolicy() + + +def has_IPv6(): + server_sock = socket.socket(socket.AF_INET6) + with server_sock: + try: + server_sock.bind(('::1', 0)) + except OSError: + return False + else: + return True + + +has_IPv6 = has_IPv6() + + +############################################################################### +# Socket Testing Utilities +############################################################################### + + +class TestSocketWrapper: + + def __init__(self, sock): + self.__sock = sock + + def recv_all(self, n): + buf = b'' + while len(buf) < n: + data = self.recv(n - len(buf)) + if data == b'': + raise ConnectionAbortedError + buf += data + return buf + + def starttls(self, ssl_context, *, + server_side=False, + server_hostname=None, + do_handshake_on_connect=True): + + assert isinstance(ssl_context, ssl.SSLContext) + + ssl_sock = ssl_context.wrap_socket( + self.__sock, server_side=server_side, + server_hostname=server_hostname, + do_handshake_on_connect=do_handshake_on_connect) + + if server_side: + ssl_sock.do_handshake() + + self.__sock.close() + self.__sock = ssl_sock + + def __getattr__(self, name): + return getattr(self.__sock, name) + + def __repr__(self): + return '<{} {!r}>'.format(type(self).__name__, self.__sock) + + +class SocketThread(threading.Thread): + + def stop(self): + self._active = False + self.join() + + def __enter__(self): + self.start() + return self + + def __exit__(self, *exc): + self.stop() + + +class TestThreadedClient(SocketThread): + + def __init__(self, test, sock, prog, timeout): + threading.Thread.__init__(self, None, None, 'test-client') + self.daemon = True + + self._timeout = timeout + self._sock = sock + self._active = True + self._prog = prog + self._test = test + + def run(self): + try: + self._prog(TestSocketWrapper(self._sock)) + except (KeyboardInterrupt, SystemExit): + raise + except BaseException as ex: + self._test._abort_socket_test(ex) + + +class TestThreadedServer(SocketThread): + + def __init__(self, test, sock, prog, timeout, max_clients): + threading.Thread.__init__(self, None, None, 'test-server') + self.daemon = True + + self._clients = 0 + self._finished_clients = 0 + self._max_clients = max_clients + self._timeout = timeout + self._sock = sock + self._active = True + + self._prog = prog + + self._s1, self._s2 = socket.socketpair() + self._s1.setblocking(False) + + self._test = test + + def stop(self): + try: + if self._s2 and self._s2.fileno() != -1: + try: + self._s2.send(b'stop') + except OSError: + pass + finally: + super().stop() + + def run(self): + try: + with self._sock: + self._sock.setblocking(0) + self._run() + finally: + self._s1.close() + self._s2.close() + + def _run(self): + while self._active: + if self._clients >= self._max_clients: + return + + r, w, x = select.select( + [self._sock, self._s1], [], [], self._timeout) + + if self._s1 in r: + return + + if self._sock in r: + try: + conn, addr = self._sock.accept() + except BlockingIOError: + continue + except socket.timeout: + if not self._active: + return + else: + raise + else: + self._clients += 1 + conn.settimeout(self._timeout) + try: + with conn: + self._handle_client(conn) + except (KeyboardInterrupt, SystemExit): + raise + except BaseException as ex: + self._active = False + try: + raise + finally: + self._test._abort_socket_test(ex) + + def _handle_client(self, sock): + self._prog(TestSocketWrapper(sock)) + + @property + def addr(self): + return self._sock.getsockname() + + +############################################################################### +# A few helpers from asyncio/tests/testutils.py +############################################################################### + + +def run_briefly(loop): + async def once(): + pass + gen = once() + t = loop.create_task(gen) + # Don't log a warning if the task is not done after run_until_complete(). + # It occurs if the loop is stopped or if a task raises a BaseException. + t._log_destroy_pending = False + try: + loop.run_until_complete(t) + finally: + gen.close() + + +def run_until(loop, pred, timeout=30): + deadline = time.time() + timeout + while not pred(): + if timeout is not None: + timeout = deadline - time.time() + if timeout <= 0: + raise asyncio.futures.TimeoutError() + loop.run_until_complete(asyncio.tasks.sleep(0.001)) + + +@contextlib.contextmanager +def disable_logger(): + """Context manager to disable asyncio logger. + + For example, it can be used to ignore warnings in debug mode. + """ + old_level = asyncio.log.logger.level + try: + asyncio.log.logger.setLevel(logging.CRITICAL + 1) + yield + finally: + asyncio.log.logger.setLevel(old_level) diff --git a/hackaton/lib/python3.12/site-packages/uvloop/_version.py b/hackaton/lib/python3.12/site-packages/uvloop/_version.py new file mode 100644 index 0000000..9e1722c --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/uvloop/_version.py @@ -0,0 +1,13 @@ +# This file MUST NOT contain anything but the __version__ assignment. +# +# When making a release, change the value of __version__ +# to an appropriate value, and open a pull request against +# the correct branch (master if making a new feature release). +# The commit message MUST contain a properly formatted release +# log, and the commit must be signed. +# +# The release automation will: build and test the packages for the +# supported platforms, publish the packages on PyPI, merge the PR +# to the target branch, create a Git tag pointing to the commit. + +__version__ = '0.21.0' diff --git a/hackaton/lib/python3.12/site-packages/uvloop/cbhandles.pxd b/hackaton/lib/python3.12/site-packages/uvloop/cbhandles.pxd new file mode 100644 index 0000000..e594b13 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/uvloop/cbhandles.pxd @@ -0,0 +1,39 @@ +cdef class Handle: + cdef: + Loop loop + object context + bint _cancelled + + str meth_name + int cb_type + void *callback + object arg1, arg2, arg3, arg4 + + object __weakref__ + + readonly _source_traceback + + cdef inline _set_loop(self, Loop loop) + cdef inline _set_context(self, object context) + + cdef inline _run(self) + cdef _cancel(self) + + cdef _format_handle(self) + + +cdef class TimerHandle: + cdef: + object callback + tuple args + bint _cancelled + UVTimer timer + Loop loop + object context + tuple _debug_info + object __weakref__ + object _when + + cdef _run(self) + cdef _cancel(self) + cdef inline _clear(self) diff --git a/hackaton/lib/python3.12/site-packages/uvloop/cbhandles.pyx b/hackaton/lib/python3.12/site-packages/uvloop/cbhandles.pyx new file mode 100644 index 0000000..2914b42 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/uvloop/cbhandles.pyx @@ -0,0 +1,434 @@ +@cython.no_gc_clear +@cython.freelist(DEFAULT_FREELIST_SIZE) +cdef class Handle: + def __cinit__(self): + self._cancelled = 0 + self.cb_type = 0 + self._source_traceback = None + + cdef inline _set_loop(self, Loop loop): + self.loop = loop + if UVLOOP_DEBUG: + loop._debug_cb_handles_total += 1 + loop._debug_cb_handles_count += 1 + if loop._debug: + self._source_traceback = extract_stack() + + cdef inline _set_context(self, object context): + if context is None: + context = Context_CopyCurrent() + self.context = context + + def __dealloc__(self): + if UVLOOP_DEBUG and self.loop is not None: + self.loop._debug_cb_handles_count -= 1 + if self.loop is None: + raise RuntimeError('Handle.loop is None in Handle.__dealloc__') + + def __init__(self): + raise TypeError( + '{} is not supposed to be instantiated from Python'.format( + self.__class__.__name__)) + + cdef inline _run(self): + cdef: + int cb_type + object callback + + if self._cancelled: + return + + cb_type = self.cb_type + + # Since _run is a cdef and there's no BoundMethod, + # we guard 'self' manually (since the callback + # might cause GC of the handle.) + Py_INCREF(self) + + try: + assert self.context is not None + Context_Enter(self.context) + + if cb_type == 1: + callback = self.arg1 + if callback is None: + raise RuntimeError( + 'cannot run Handle; callback is not set') + + args = self.arg2 + + if args is None: + callback() + else: + callback(*args) + + elif cb_type == 2: + (self.callback)(self.arg1) + + elif cb_type == 3: + (self.callback)(self.arg1, self.arg2) + + elif cb_type == 4: + (self.callback)(self.arg1, self.arg2, self.arg3) + + elif cb_type == 5: + (self.callback)( + self.arg1, self.arg2, self.arg3, self.arg4) + + else: + raise RuntimeError('invalid Handle.cb_type: {}'.format( + cb_type)) + + except (KeyboardInterrupt, SystemExit): + raise + except BaseException as ex: + if cb_type == 1: + msg = 'Exception in callback {}'.format(callback) + else: + msg = 'Exception in callback {}'.format(self.meth_name) + + context = { + 'message': msg, + 'exception': ex, + 'handle': self, + } + + if self._source_traceback is not None: + context['source_traceback'] = self._source_traceback + + self.loop.call_exception_handler(context) + + finally: + context = self.context + Py_DECREF(self) + Context_Exit(context) + + cdef _cancel(self): + self._cancelled = 1 + self.callback = NULL + self.arg1 = self.arg2 = self.arg3 = self.arg4 = None + + cdef _format_handle(self): + # Mirrors `asyncio.base_events._format_handle`. + if self.cb_type == 1 and self.arg1 is not None: + cb = self.arg1 + if isinstance(getattr(cb, '__self__', None), aio_Task): + try: + return repr(cb.__self__) + except (AttributeError, TypeError, ValueError) as ex: + # Cython generates empty __code__ objects for coroutines + # that can crash asyncio.Task.__repr__ with an + # AttributeError etc. Guard against that. + self.loop.call_exception_handler({ + 'message': 'exception in Task.__repr__', + 'task': cb.__self__, + 'exception': ex, + 'handle': self, + }) + return repr(self) + + # Public API + + def __repr__(self): + info = [self.__class__.__name__] + + if self._cancelled: + info.append('cancelled') + + if self.cb_type == 1 and self.arg1 is not None: + func = self.arg1 + # Cython can unset func.__qualname__/__name__, hence the checks. + if hasattr(func, '__qualname__') and func.__qualname__: + cb_name = func.__qualname__ + elif hasattr(func, '__name__') and func.__name__: + cb_name = func.__name__ + else: + cb_name = repr(func) + + info.append(cb_name) + elif self.meth_name is not None: + info.append(self.meth_name) + + if self._source_traceback is not None: + frame = self._source_traceback[-1] + info.append('created at {}:{}'.format(frame[0], frame[1])) + + return '<' + ' '.join(info) + '>' + + def cancel(self): + self._cancel() + + def cancelled(self): + return self._cancelled + + +@cython.no_gc_clear +@cython.freelist(DEFAULT_FREELIST_SIZE) +cdef class TimerHandle: + def __cinit__(self, Loop loop, object callback, object args, + uint64_t delay, object context): + + self.loop = loop + self.callback = callback + self.args = args + self._cancelled = 0 + + if UVLOOP_DEBUG: + self.loop._debug_cb_timer_handles_total += 1 + self.loop._debug_cb_timer_handles_count += 1 + + if context is None: + context = Context_CopyCurrent() + self.context = context + + if loop._debug: + self._debug_info = ( + format_callback_name(callback), + extract_stack() + ) + else: + self._debug_info = None + + self.timer = UVTimer.new( + loop, self._run, self, delay) + + self.timer.start() + self._when = self.timer.get_when() * 1e-3 + + # Only add to loop._timers when `self.timer` is successfully created + loop._timers.add(self) + + property _source_traceback: + def __get__(self): + if self._debug_info is not None: + return self._debug_info[1] + + def __dealloc__(self): + if UVLOOP_DEBUG: + self.loop._debug_cb_timer_handles_count -= 1 + if self.timer is not None: + raise RuntimeError('active TimerHandle is deallacating') + + cdef _cancel(self): + if self._cancelled == 1: + return + self._cancelled = 1 + self._clear() + + cdef inline _clear(self): + if self.timer is None: + return + + self.callback = None + self.args = None + + try: + self.loop._timers.remove(self) + finally: + self.timer._close() + self.timer = None # let the UVTimer handle GC + + cdef _run(self): + if self._cancelled == 1: + return + if self.callback is None: + raise RuntimeError('cannot run TimerHandle; callback is not set') + + callback = self.callback + args = self.args + + # Since _run is a cdef and there's no BoundMethod, + # we guard 'self' manually. + Py_INCREF(self) + + if self.loop._debug: + started = time_monotonic() + try: + assert self.context is not None + Context_Enter(self.context) + + if args is not None: + callback(*args) + else: + callback() + except (KeyboardInterrupt, SystemExit): + raise + except BaseException as ex: + context = { + 'message': 'Exception in callback {}'.format(callback), + 'exception': ex, + 'handle': self, + } + + if self._debug_info is not None: + context['source_traceback'] = self._debug_info[1] + + self.loop.call_exception_handler(context) + else: + if self.loop._debug: + delta = time_monotonic() - started + if delta > self.loop.slow_callback_duration: + aio_logger.warning( + 'Executing %r took %.3f seconds', + self, delta) + finally: + context = self.context + Py_DECREF(self) + Context_Exit(context) + self._clear() + + # Public API + + def __repr__(self): + info = [self.__class__.__name__] + + if self._cancelled: + info.append('cancelled') + + if self._debug_info is not None: + callback_name = self._debug_info[0] + source_traceback = self._debug_info[1] + else: + callback_name = None + source_traceback = None + + if callback_name is not None: + info.append(callback_name) + elif self.callback is not None: + info.append(format_callback_name(self.callback)) + + if source_traceback is not None: + frame = source_traceback[-1] + info.append('created at {}:{}'.format(frame[0], frame[1])) + + return '<' + ' '.join(info) + '>' + + def cancelled(self): + return self._cancelled + + def cancel(self): + self._cancel() + + def when(self): + return self._when + + +cdef format_callback_name(func): + if hasattr(func, '__qualname__'): + cb_name = getattr(func, '__qualname__') + elif hasattr(func, '__name__'): + cb_name = getattr(func, '__name__') + else: + cb_name = repr(func) + return cb_name + + +cdef new_Handle(Loop loop, object callback, object args, object context): + cdef Handle handle + handle = Handle.__new__(Handle) + handle._set_loop(loop) + handle._set_context(context) + + handle.cb_type = 1 + + handle.arg1 = callback + handle.arg2 = args + + return handle + + +cdef new_MethodHandle(Loop loop, str name, method_t callback, object context, + object bound_to): + cdef Handle handle + handle = Handle.__new__(Handle) + handle._set_loop(loop) + handle._set_context(context) + + handle.cb_type = 2 + handle.meth_name = name + + handle.callback = callback + handle.arg1 = bound_to + + return handle + + +cdef new_MethodHandle1(Loop loop, str name, method1_t callback, object context, + object bound_to, object arg): + + cdef Handle handle + handle = Handle.__new__(Handle) + handle._set_loop(loop) + handle._set_context(context) + + handle.cb_type = 3 + handle.meth_name = name + + handle.callback = callback + handle.arg1 = bound_to + handle.arg2 = arg + + return handle + + +cdef new_MethodHandle2(Loop loop, str name, method2_t callback, object context, + object bound_to, object arg1, object arg2): + + cdef Handle handle + handle = Handle.__new__(Handle) + handle._set_loop(loop) + handle._set_context(context) + + handle.cb_type = 4 + handle.meth_name = name + + handle.callback = callback + handle.arg1 = bound_to + handle.arg2 = arg1 + handle.arg3 = arg2 + + return handle + + +cdef new_MethodHandle3(Loop loop, str name, method3_t callback, object context, + object bound_to, object arg1, object arg2, object arg3): + + cdef Handle handle + handle = Handle.__new__(Handle) + handle._set_loop(loop) + handle._set_context(context) + + handle.cb_type = 5 + handle.meth_name = name + + handle.callback = callback + handle.arg1 = bound_to + handle.arg2 = arg1 + handle.arg3 = arg2 + handle.arg4 = arg3 + + return handle + + +cdef extract_stack(): + """Replacement for traceback.extract_stack() that only does the + necessary work for asyncio debug mode. + """ + try: + f = sys_getframe() + # sys._getframe() might raise ValueError if being called without a frame, e.g. + # from Cython or similar C extensions. + except ValueError: + return None + if f is None: + return + + try: + stack = tb_StackSummary.extract(tb_walk_stack(f), + limit=DEBUG_STACK_DEPTH, + lookup_lines=False) + finally: + f = None + + stack.reverse() + return stack diff --git a/hackaton/lib/python3.12/site-packages/uvloop/dns.pyx b/hackaton/lib/python3.12/site-packages/uvloop/dns.pyx new file mode 100644 index 0000000..67aeb59 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/uvloop/dns.pyx @@ -0,0 +1,479 @@ +cdef __port_to_int(port, proto): + if type(port) is int: + return port + + if port is None or port == '' or port == b'': + return 0 + + try: + return int(port) + except (ValueError, TypeError): + pass + + if isinstance(port, bytes): + port = port.decode() + + if isinstance(port, str) and proto is not None: + if proto == uv.IPPROTO_TCP: + return socket_getservbyname(port, 'tcp') + elif proto == uv.IPPROTO_UDP: + return socket_getservbyname(port, 'udp') + + raise OSError('service/proto not found') + + +cdef __convert_sockaddr_to_pyaddr(const system.sockaddr* addr): + # Converts sockaddr structs into what Python socket + # module can understand: + # - for IPv4 a tuple of (host, port) + # - for IPv6 a tuple of (host, port, flowinfo, scope_id) + + cdef: + char buf[128] # INET6_ADDRSTRLEN is usually 46 + int err + system.sockaddr_in *addr4 + system.sockaddr_in6 *addr6 + system.sockaddr_un *addr_un + + if addr.sa_family == uv.AF_INET: + addr4 = addr + + err = uv.uv_ip4_name(addr4, buf, sizeof(buf)) + if err < 0: + raise convert_error(err) + + return ( + PyUnicode_FromString(buf), + system.ntohs(addr4.sin_port) + ) + + elif addr.sa_family == uv.AF_INET6: + addr6 = addr + + err = uv.uv_ip6_name(addr6, buf, sizeof(buf)) + if err < 0: + raise convert_error(err) + + return ( + PyUnicode_FromString(buf), + system.ntohs(addr6.sin6_port), + system.ntohl(addr6.sin6_flowinfo), + addr6.sin6_scope_id + ) + + elif addr.sa_family == uv.AF_UNIX: + addr_un = addr + return system.MakeUnixSockPyAddr(addr_un) + + raise RuntimeError("cannot convert sockaddr into Python object") + + +@cython.freelist(DEFAULT_FREELIST_SIZE) +cdef class SockAddrHolder: + cdef: + int family + system.sockaddr_storage addr + Py_ssize_t addr_size + + +cdef LruCache sockaddrs = LruCache(maxsize=DNS_PYADDR_TO_SOCKADDR_CACHE_SIZE) + + +cdef __convert_pyaddr_to_sockaddr(int family, object addr, + system.sockaddr* res): + cdef: + int err + int addr_len + int scope_id = 0 + int flowinfo = 0 + char *buf + Py_ssize_t buflen + SockAddrHolder ret + + ret = sockaddrs.get(addr, None) + if ret is not None and ret.family == family: + memcpy(res, &ret.addr, ret.addr_size) + return + + ret = SockAddrHolder.__new__(SockAddrHolder) + if family == uv.AF_INET: + if not isinstance(addr, tuple): + raise TypeError('AF_INET address must be tuple') + if len(addr) != 2: + raise ValueError('AF_INET address must be tuple of (host, port)') + host, port = addr + if isinstance(host, str): + try: + # idna codec is rather slow, so we try ascii first. + host = host.encode('ascii') + except UnicodeEncodeError: + host = host.encode('idna') + if not isinstance(host, (bytes, bytearray)): + raise TypeError('host must be a string or bytes object') + + port = __port_to_int(port, None) + + ret.addr_size = sizeof(system.sockaddr_in) + err = uv.uv_ip4_addr(host, port, &ret.addr) + if err < 0: + raise convert_error(err) + + elif family == uv.AF_INET6: + if not isinstance(addr, tuple): + raise TypeError('AF_INET6 address must be tuple') + + addr_len = len(addr) + if addr_len < 2 or addr_len > 4: + raise ValueError( + 'AF_INET6 must be a tuple of 2-4 parameters: ' + '(host, port, flowinfo?, scope_id?)') + + host = addr[0] + if isinstance(host, str): + try: + # idna codec is rather slow, so we try ascii first. + host = host.encode('ascii') + except UnicodeEncodeError: + host = host.encode('idna') + if not isinstance(host, (bytes, bytearray)): + raise TypeError('host must be a string or bytes object') + + port = __port_to_int(addr[1], None) + + if addr_len > 2: + flowinfo = addr[2] + if addr_len > 3: + scope_id = addr[3] + + ret.addr_size = sizeof(system.sockaddr_in6) + + err = uv.uv_ip6_addr(host, port, &ret.addr) + if err < 0: + raise convert_error(err) + + (&ret.addr).sin6_flowinfo = flowinfo + (&ret.addr).sin6_scope_id = scope_id + + elif family == uv.AF_UNIX: + if isinstance(addr, str): + addr = addr.encode(sys_getfilesystemencoding()) + elif not isinstance(addr, bytes): + raise TypeError('AF_UNIX address must be a str or a bytes object') + + PyBytes_AsStringAndSize(addr, &buf, &buflen) + if buflen > 107: + raise ValueError( + f'unix socket path {addr!r} is longer than 107 characters') + + ret.addr_size = sizeof(system.sockaddr_un) + memset(&ret.addr, 0, sizeof(system.sockaddr_un)) + (&ret.addr).sun_family = uv.AF_UNIX + memcpy((&ret.addr).sun_path, buf, buflen) + + else: + raise ValueError( + f'expected AF_INET, AF_INET6, or AF_UNIX family, got {family}') + + ret.family = family + sockaddrs[addr] = ret + memcpy(res, &ret.addr, ret.addr_size) + + +cdef __static_getaddrinfo(object host, object port, + int family, int type, + int proto, + system.sockaddr *addr): + + if proto not in {0, uv.IPPROTO_TCP, uv.IPPROTO_UDP}: + return + + if _is_sock_stream(type): + proto = uv.IPPROTO_TCP + elif _is_sock_dgram(type): + proto = uv.IPPROTO_UDP + else: + return + + try: + port = __port_to_int(port, proto) + except Exception: + return + + hp = (host, port) + if family == uv.AF_UNSPEC: + try: + __convert_pyaddr_to_sockaddr(uv.AF_INET, hp, addr) + except Exception: + pass + else: + return (uv.AF_INET, type, proto) + + try: + __convert_pyaddr_to_sockaddr(uv.AF_INET6, hp, addr) + except Exception: + pass + else: + return (uv.AF_INET6, type, proto) + + else: + try: + __convert_pyaddr_to_sockaddr(family, hp, addr) + except Exception: + pass + else: + return (family, type, proto) + + +cdef __static_getaddrinfo_pyaddr(object host, object port, + int family, int type, + int proto, int flags): + + cdef: + system.sockaddr_storage addr + object triplet + + triplet = __static_getaddrinfo( + host, port, family, type, + proto, &addr) + if triplet is None: + return + + af, type, proto = triplet + + try: + pyaddr = __convert_sockaddr_to_pyaddr(&addr) + except Exception: + return + + # When the host is an IP while type is one of TCP or UDP, different libc + # implementations of getaddrinfo() behave differently: + # 1. When AI_CANONNAME is set: + # * glibc: returns ai_canonname + # * musl: returns ai_canonname + # * macOS: returns an empty string for ai_canonname + # 2. When AI_CANONNAME is NOT set: + # * glibc: returns an empty string for ai_canonname + # * musl: returns ai_canonname + # * macOS: returns an empty string for ai_canonname + # At the same time, libuv and CPython both uses libc directly, even though + # this different behavior is violating what is in the documentation. + # + # uvloop potentially should be a 100% drop-in replacement for asyncio, + # doing whatever asyncio does, especially when the libc implementations are + # also different in the same way. However, making our implementation to be + # consistent with libc/CPython would be complex and hard to maintain + # (including caching libc behaviors when flag is/not set), therefore we + # decided to simply normalize the behavior in uvloop for this very marginal + # case following the documentation, even though uvloop would behave + # differently to asyncio on macOS and musl platforms, when again the host + # is an IP and type is one of TCP or UDP. + # All other cases are still asyncio-compatible. + if flags & socket_AI_CANONNAME: + if isinstance(host, str): + canon_name = host + else: + canon_name = host.decode('ascii') + else: + canon_name = '' + + return ( + _intenum_converter(af, socket_AddressFamily), + _intenum_converter(type, socket_SocketKind), + proto, + canon_name, + pyaddr, + ) + + +@cython.freelist(DEFAULT_FREELIST_SIZE) +cdef class AddrInfo: + cdef: + system.addrinfo *data + + def __cinit__(self): + self.data = NULL + + def __dealloc__(self): + if self.data is not NULL: + uv.uv_freeaddrinfo(self.data) # returns void + self.data = NULL + + cdef void set_data(self, system.addrinfo *data) noexcept: + self.data = data + + cdef unpack(self): + cdef: + list result = [] + system.addrinfo *ptr + + if self.data is NULL: + raise RuntimeError('AddrInfo.data is NULL') + + ptr = self.data + while ptr != NULL: + if ptr.ai_addr.sa_family in (uv.AF_INET, uv.AF_INET6): + result.append(( + _intenum_converter(ptr.ai_family, socket_AddressFamily), + _intenum_converter(ptr.ai_socktype, socket_SocketKind), + ptr.ai_protocol, + ('' if ptr.ai_canonname is NULL else + (ptr.ai_canonname).decode()), + __convert_sockaddr_to_pyaddr(ptr.ai_addr) + )) + + ptr = ptr.ai_next + + return result + + @staticmethod + cdef int isinstance(object other): + return type(other) is AddrInfo + + +cdef class AddrInfoRequest(UVRequest): + cdef: + system.addrinfo hints + object callback + uv.uv_getaddrinfo_t _req_data + + def __cinit__(self, Loop loop, + bytes host, bytes port, + int family, int type, int proto, int flags, + object callback): + + cdef: + int err + char *chost + char *cport + + if host is None: + chost = NULL + elif host == b'' and sys.platform == 'darwin': + # It seems `getaddrinfo("", ...)` on macOS is equivalent to + # `getaddrinfo("localhost", ...)`. This is inconsistent with + # libuv 1.48 which treats empty nodename as EINVAL. + chost = 'localhost' + else: + chost = host + + if port is None: + cport = NULL + else: + cport = port + + memset(&self.hints, 0, sizeof(system.addrinfo)) + self.hints.ai_flags = flags + self.hints.ai_family = family + self.hints.ai_socktype = type + self.hints.ai_protocol = proto + + self.request = &self._req_data + self.callback = callback + self.request.data = self + + err = uv.uv_getaddrinfo(loop.uvloop, + self.request, + __on_addrinfo_resolved, + chost, + cport, + &self.hints) + + if err < 0: + self.on_done() + try: + if err == uv.UV_EINVAL: + # Convert UV_EINVAL to EAI_NONAME to match libc behavior + msg = system.gai_strerror(socket_EAI_NONAME).decode('utf-8') + ex = socket_gaierror(socket_EAI_NONAME, msg) + else: + ex = convert_error(err) + except Exception as ex: + callback(ex) + else: + callback(ex) + + +cdef class NameInfoRequest(UVRequest): + cdef: + object callback + uv.uv_getnameinfo_t _req_data + + def __cinit__(self, Loop loop, callback): + self.request = &self._req_data + self.callback = callback + self.request.data = self + + cdef query(self, system.sockaddr *addr, int flags): + cdef int err + err = uv.uv_getnameinfo(self.loop.uvloop, + self.request, + __on_nameinfo_resolved, + addr, + flags) + if err < 0: + self.on_done() + self.callback(convert_error(err)) + + +cdef _intenum_converter(value, enum_klass): + try: + return enum_klass(value) + except ValueError: + return value + + +cdef void __on_addrinfo_resolved( + uv.uv_getaddrinfo_t *resolver, + int status, + system.addrinfo *res, +) noexcept with gil: + + if resolver.data is NULL: + aio_logger.error( + 'AddrInfoRequest callback called with NULL resolver.data') + return + + cdef: + AddrInfoRequest request = resolver.data + Loop loop = request.loop + object callback = request.callback + AddrInfo ai + + try: + if status < 0: + callback(convert_error(status)) + else: + ai = AddrInfo() + ai.set_data(res) + callback(ai) + except (KeyboardInterrupt, SystemExit): + raise + except BaseException as ex: + loop._handle_exception(ex) + finally: + request.on_done() + + +cdef void __on_nameinfo_resolved( + uv.uv_getnameinfo_t* req, + int status, + const char* hostname, + const char* service, +) noexcept with gil: + cdef: + NameInfoRequest request = req.data + Loop loop = request.loop + object callback = request.callback + + try: + if status < 0: + callback(convert_error(status)) + else: + callback(((hostname).decode(), + (service).decode())) + except (KeyboardInterrupt, SystemExit): + raise + except BaseException as ex: + loop._handle_exception(ex) + finally: + request.on_done() diff --git a/hackaton/lib/python3.12/site-packages/uvloop/errors.pyx b/hackaton/lib/python3.12/site-packages/uvloop/errors.pyx new file mode 100644 index 0000000..d810d65 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/uvloop/errors.pyx @@ -0,0 +1,113 @@ +cdef str __strerr(int errno): + return strerror(errno).decode() + + +cdef __convert_python_error(int uverr): + # XXX Won't work for Windows: + # From libuv docs: + # Implementation detail: on Unix error codes are the + # negated errno (or -errno), while on Windows they + # are defined by libuv to arbitrary negative numbers. + cdef int oserr = -uverr + + exc = OSError + + if uverr in (uv.UV_EACCES, uv.UV_EPERM): + exc = PermissionError + + elif uverr in (uv.UV_EAGAIN, uv.UV_EALREADY): + exc = BlockingIOError + + elif uverr in (uv.UV_EPIPE, uv.UV_ESHUTDOWN): + exc = BrokenPipeError + + elif uverr == uv.UV_ECONNABORTED: + exc = ConnectionAbortedError + + elif uverr == uv.UV_ECONNREFUSED: + exc = ConnectionRefusedError + + elif uverr == uv.UV_ECONNRESET: + exc = ConnectionResetError + + elif uverr == uv.UV_EEXIST: + exc = FileExistsError + + elif uverr == uv.UV_ENOENT: + exc = FileNotFoundError + + elif uverr == uv.UV_EINTR: + exc = InterruptedError + + elif uverr == uv.UV_EISDIR: + exc = IsADirectoryError + + elif uverr == uv.UV_ESRCH: + exc = ProcessLookupError + + elif uverr == uv.UV_ETIMEDOUT: + exc = TimeoutError + + return exc(oserr, __strerr(oserr)) + + +cdef int __convert_socket_error(int uverr): + cdef int sock_err = 0 + + if uverr == uv.UV_EAI_ADDRFAMILY: + sock_err = socket_EAI_ADDRFAMILY + + elif uverr == uv.UV_EAI_AGAIN: + sock_err = socket_EAI_AGAIN + + elif uverr == uv.UV_EAI_BADFLAGS: + sock_err = socket_EAI_BADFLAGS + + elif uverr == uv.UV_EAI_BADHINTS: + sock_err = socket_EAI_BADHINTS + + elif uverr == uv.UV_EAI_CANCELED: + sock_err = socket_EAI_CANCELED + + elif uverr == uv.UV_EAI_FAIL: + sock_err = socket_EAI_FAIL + + elif uverr == uv.UV_EAI_FAMILY: + sock_err = socket_EAI_FAMILY + + elif uverr == uv.UV_EAI_MEMORY: + sock_err = socket_EAI_MEMORY + + elif uverr == uv.UV_EAI_NODATA: + sock_err = socket_EAI_NODATA + + elif uverr == uv.UV_EAI_NONAME: + sock_err = socket_EAI_NONAME + + elif uverr == uv.UV_EAI_OVERFLOW: + sock_err = socket_EAI_OVERFLOW + + elif uverr == uv.UV_EAI_PROTOCOL: + sock_err = socket_EAI_PROTOCOL + + elif uverr == uv.UV_EAI_SERVICE: + sock_err = socket_EAI_SERVICE + + elif uverr == uv.UV_EAI_SOCKTYPE: + sock_err = socket_EAI_SOCKTYPE + + return sock_err + + +cdef convert_error(int uverr): + cdef int sock_err + + if uverr == uv.UV_ECANCELED: + return aio_CancelledError() + + sock_err = __convert_socket_error(uverr) + if sock_err: + msg = system.gai_strerror(sock_err).decode('utf-8') + return socket_gaierror(sock_err, msg) + + return __convert_python_error(uverr) diff --git a/hackaton/lib/python3.12/site-packages/uvloop/handles/async_.pxd b/hackaton/lib/python3.12/site-packages/uvloop/handles/async_.pxd new file mode 100644 index 0000000..5f0d820 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/uvloop/handles/async_.pxd @@ -0,0 +1,11 @@ +cdef class UVAsync(UVHandle): + cdef: + method_t callback + object ctx + + cdef _init(self, Loop loop, method_t callback, object ctx) + + cdef send(self) + + @staticmethod + cdef UVAsync new(Loop loop, method_t callback, object ctx) diff --git a/hackaton/lib/python3.12/site-packages/uvloop/handles/async_.pyx b/hackaton/lib/python3.12/site-packages/uvloop/handles/async_.pyx new file mode 100644 index 0000000..5c740cf --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/uvloop/handles/async_.pyx @@ -0,0 +1,56 @@ +@cython.no_gc_clear +cdef class UVAsync(UVHandle): + cdef _init(self, Loop loop, method_t callback, object ctx): + cdef int err + + self._start_init(loop) + + self._handle = PyMem_RawMalloc(sizeof(uv.uv_async_t)) + if self._handle is NULL: + self._abort_init() + raise MemoryError() + + err = uv.uv_async_init(self._loop.uvloop, + self._handle, + __uvasync_callback) + if err < 0: + self._abort_init() + raise convert_error(err) + + self._finish_init() + + self.callback = callback + self.ctx = ctx + + cdef send(self): + cdef int err + + self._ensure_alive() + + err = uv.uv_async_send(self._handle) + if err < 0: + exc = convert_error(err) + self._fatal_error(exc, True) + return + + @staticmethod + cdef UVAsync new(Loop loop, method_t callback, object ctx): + cdef UVAsync handle + handle = UVAsync.__new__(UVAsync) + handle._init(loop, callback, ctx) + return handle + + +cdef void __uvasync_callback( + uv.uv_async_t* handle, +) noexcept with gil: + if __ensure_handle_data(handle, "UVAsync callback") == 0: + return + + cdef: + UVAsync async_ = handle.data + method_t cb = async_.callback + try: + cb(async_.ctx) + except BaseException as ex: + async_._error(ex, False) diff --git a/hackaton/lib/python3.12/site-packages/uvloop/handles/basetransport.pxd b/hackaton/lib/python3.12/site-packages/uvloop/handles/basetransport.pxd new file mode 100644 index 0000000..ba356a7 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/uvloop/handles/basetransport.pxd @@ -0,0 +1,54 @@ +cdef class UVBaseTransport(UVSocketHandle): + + cdef: + readonly bint _closing + + bint _protocol_connected + bint _protocol_paused + object _protocol_data_received + size_t _high_water + size_t _low_water + + object _protocol + Server _server + object _waiter + + dict _extra_info + + uint32_t _conn_lost + + object __weakref__ + + # All "inline" methods are final + + cdef inline _maybe_pause_protocol(self) + cdef inline _maybe_resume_protocol(self) + + cdef inline _schedule_call_connection_made(self) + cdef inline _schedule_call_connection_lost(self, exc) + + cdef _wakeup_waiter(self) + cdef _call_connection_made(self) + cdef _call_connection_lost(self, exc) + + # Overloads of UVHandle methods: + cdef _fatal_error(self, exc, throw, reason=?) + cdef _close(self) + + cdef inline _set_server(self, Server server) + cdef inline _set_waiter(self, object waiter) + + cdef _set_protocol(self, object protocol) + cdef _clear_protocol(self) + + cdef inline _init_protocol(self) + cdef inline _add_extra_info(self, str name, object obj) + + # === overloads === + + cdef _new_socket(self) + cdef size_t _get_write_buffer_size(self) + + cdef bint _is_reading(self) + cdef _start_reading(self) + cdef _stop_reading(self) diff --git a/hackaton/lib/python3.12/site-packages/uvloop/handles/basetransport.pyx b/hackaton/lib/python3.12/site-packages/uvloop/handles/basetransport.pyx new file mode 100644 index 0000000..28b3079 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/uvloop/handles/basetransport.pyx @@ -0,0 +1,293 @@ +cdef class UVBaseTransport(UVSocketHandle): + + def __cinit__(self): + # Flow control + self._high_water = FLOW_CONTROL_HIGH_WATER * 1024 + self._low_water = FLOW_CONTROL_HIGH_WATER // 4 + + self._protocol = None + self._protocol_connected = 0 + self._protocol_paused = 0 + self._protocol_data_received = None + + self._server = None + self._waiter = None + self._extra_info = None + + self._conn_lost = 0 + + self._closing = 0 + + cdef size_t _get_write_buffer_size(self): + return 0 + + cdef inline _schedule_call_connection_made(self): + self._loop._call_soon_handle( + new_MethodHandle(self._loop, + "UVTransport._call_connection_made", + self._call_connection_made, + self.context, + self)) + + cdef inline _schedule_call_connection_lost(self, exc): + self._loop._call_soon_handle( + new_MethodHandle1(self._loop, + "UVTransport._call_connection_lost", + self._call_connection_lost, + self.context, + self, exc)) + + cdef _fatal_error(self, exc, throw, reason=None): + # Overload UVHandle._fatal_error + + self._force_close(exc) + + if not isinstance(exc, OSError): + + if throw or self._loop is None: + raise exc + + msg = f'Fatal error on transport {self.__class__.__name__}' + if reason is not None: + msg = f'{msg} ({reason})' + + self._loop.call_exception_handler({ + 'message': msg, + 'exception': exc, + 'transport': self, + 'protocol': self._protocol, + }) + + cdef inline _maybe_pause_protocol(self): + cdef: + size_t size = self._get_write_buffer_size() + + if size <= self._high_water: + return + + if not self._protocol_paused: + self._protocol_paused = 1 + try: + # _maybe_pause_protocol() is always triggered from user-calls, + # so we must copy the context to avoid entering context twice + run_in_context( + self.context.copy(), self._protocol.pause_writing, + ) + except (KeyboardInterrupt, SystemExit): + raise + except BaseException as exc: + self._loop.call_exception_handler({ + 'message': 'protocol.pause_writing() failed', + 'exception': exc, + 'transport': self, + 'protocol': self._protocol, + }) + + cdef inline _maybe_resume_protocol(self): + cdef: + size_t size = self._get_write_buffer_size() + + if self._protocol_paused and size <= self._low_water: + self._protocol_paused = 0 + try: + # We're copying the context to avoid entering context twice, + # even though it's not always necessary to copy - it's easier + # to copy here than passing down a copied context. + run_in_context( + self.context.copy(), self._protocol.resume_writing, + ) + except (KeyboardInterrupt, SystemExit): + raise + except BaseException as exc: + self._loop.call_exception_handler({ + 'message': 'protocol.resume_writing() failed', + 'exception': exc, + 'transport': self, + 'protocol': self._protocol, + }) + + cdef _wakeup_waiter(self): + if self._waiter is not None: + if not self._waiter.cancelled(): + if not self._is_alive(): + self._waiter.set_exception( + RuntimeError( + 'closed Transport handle and unset waiter')) + else: + self._waiter.set_result(True) + self._waiter = None + + cdef _call_connection_made(self): + if self._protocol is None: + raise RuntimeError( + 'protocol is not set, cannot call connection_made()') + + # We use `_is_alive()` and not `_closing`, because we call + # `transport._close()` in `loop.create_connection()` if an + # exception happens during `await waiter`. + if not self._is_alive(): + # A connection waiter can be cancelled between + # 'await loop.create_connection()' and + # `_schedule_call_connection_made` and + # the actual `_call_connection_made`. + self._wakeup_waiter() + return + + # Set _protocol_connected to 1 before calling "connection_made": + # if transport is aborted or closed, "connection_lost" will + # still be scheduled. + self._protocol_connected = 1 + + try: + self._protocol.connection_made(self) + except BaseException: + self._wakeup_waiter() + raise + + if not self._is_alive(): + # This might happen when "transport.abort()" is called + # from "Protocol.connection_made". + self._wakeup_waiter() + return + + self._start_reading() + self._wakeup_waiter() + + cdef _call_connection_lost(self, exc): + if self._waiter is not None: + if not self._waiter.done(): + self._waiter.set_exception(exc) + self._waiter = None + + if self._closed: + # The handle is closed -- likely, _call_connection_lost + # was already called before. + return + + try: + if self._protocol_connected: + self._protocol.connection_lost(exc) + finally: + self._clear_protocol() + + self._close() + + server = self._server + if server is not None: + (server)._detach() + self._server = None + + cdef inline _set_server(self, Server server): + self._server = server + (server)._attach() + + cdef inline _set_waiter(self, object waiter): + if waiter is not None and not isfuture(waiter): + raise TypeError( + f'invalid waiter object {waiter!r}, expected asyncio.Future') + + self._waiter = waiter + + cdef _set_protocol(self, object protocol): + self._protocol = protocol + # Store a reference to the bound method directly + try: + self._protocol_data_received = protocol.data_received + except AttributeError: + pass + + cdef _clear_protocol(self): + self._protocol = None + self._protocol_data_received = None + + cdef inline _init_protocol(self): + self._loop._track_transport(self) + if self._protocol is None: + raise RuntimeError('invalid _init_protocol call') + self._schedule_call_connection_made() + + cdef inline _add_extra_info(self, str name, object obj): + if self._extra_info is None: + self._extra_info = {} + self._extra_info[name] = obj + + cdef bint _is_reading(self): + raise NotImplementedError + + cdef _start_reading(self): + raise NotImplementedError + + cdef _stop_reading(self): + raise NotImplementedError + + # === Public API === + + property _paused: + # Used by SSLProto. Might be removed in the future. + def __get__(self): + return bool(not self._is_reading()) + + def get_protocol(self): + return self._protocol + + def set_protocol(self, protocol): + self._set_protocol(protocol) + if self._is_reading(): + self._stop_reading() + self._start_reading() + + def _force_close(self, exc): + # Used by SSLProto. Might be removed in the future. + if self._conn_lost or self._closed: + return + if not self._closing: + self._closing = 1 + self._stop_reading() + self._conn_lost += 1 + self._schedule_call_connection_lost(exc) + + def abort(self): + self._force_close(None) + + def close(self): + if self._closing or self._closed: + return + + self._closing = 1 + self._stop_reading() + + if not self._get_write_buffer_size(): + # The write buffer is empty + self._conn_lost += 1 + self._schedule_call_connection_lost(None) + + def is_closing(self): + return self._closing + + def get_write_buffer_size(self): + return self._get_write_buffer_size() + + def set_write_buffer_limits(self, high=None, low=None): + self._ensure_alive() + + self._high_water, self._low_water = add_flowcontrol_defaults( + high, low, FLOW_CONTROL_HIGH_WATER) + + self._maybe_pause_protocol() + + def get_write_buffer_limits(self): + return (self._low_water, self._high_water) + + def get_extra_info(self, name, default=None): + if self._extra_info is not None and name in self._extra_info: + return self._extra_info[name] + if name == 'socket': + return self._get_socket() + if name == 'sockname': + return self._get_socket().getsockname() + if name == 'peername': + try: + return self._get_socket().getpeername() + except socket_error: + return default + return default diff --git a/hackaton/lib/python3.12/site-packages/uvloop/handles/check.pxd b/hackaton/lib/python3.12/site-packages/uvloop/handles/check.pxd new file mode 100644 index 0000000..86cfd8f --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/uvloop/handles/check.pxd @@ -0,0 +1,14 @@ +cdef class UVCheck(UVHandle): + cdef: + Handle h + bint running + + # All "inline" methods are final + + cdef _init(self, Loop loop, Handle h) + + cdef inline stop(self) + cdef inline start(self) + + @staticmethod + cdef UVCheck new(Loop loop, Handle h) diff --git a/hackaton/lib/python3.12/site-packages/uvloop/handles/check.pyx b/hackaton/lib/python3.12/site-packages/uvloop/handles/check.pyx new file mode 100644 index 0000000..1a61c4e --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/uvloop/handles/check.pyx @@ -0,0 +1,72 @@ +@cython.no_gc_clear +cdef class UVCheck(UVHandle): + cdef _init(self, Loop loop, Handle h): + cdef int err + + self._start_init(loop) + + self._handle = PyMem_RawMalloc(sizeof(uv.uv_check_t)) + if self._handle is NULL: + self._abort_init() + raise MemoryError() + + err = uv.uv_check_init(self._loop.uvloop, self._handle) + if err < 0: + self._abort_init() + raise convert_error(err) + + self._finish_init() + + self.h = h + self.running = 0 + + cdef inline stop(self): + cdef int err + + if not self._is_alive(): + self.running = 0 + return + + if self.running == 1: + err = uv.uv_check_stop(self._handle) + self.running = 0 + if err < 0: + exc = convert_error(err) + self._fatal_error(exc, True) + return + + cdef inline start(self): + cdef int err + + self._ensure_alive() + + if self.running == 0: + err = uv.uv_check_start(self._handle, + cb_check_callback) + if err < 0: + exc = convert_error(err) + self._fatal_error(exc, True) + return + self.running = 1 + + @staticmethod + cdef UVCheck new(Loop loop, Handle h): + cdef UVCheck handle + handle = UVCheck.__new__(UVCheck) + handle._init(loop, h) + return handle + + +cdef void cb_check_callback( + uv.uv_check_t* handle, +) noexcept with gil: + if __ensure_handle_data(handle, "UVCheck callback") == 0: + return + + cdef: + UVCheck check = handle.data + Handle h = check.h + try: + h._run() + except BaseException as ex: + check._error(ex, False) diff --git a/hackaton/lib/python3.12/site-packages/uvloop/handles/fsevent.pxd b/hackaton/lib/python3.12/site-packages/uvloop/handles/fsevent.pxd new file mode 100644 index 0000000..3a32428 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/uvloop/handles/fsevent.pxd @@ -0,0 +1,12 @@ +cdef class UVFSEvent(UVHandle): + cdef: + object callback + bint running + + cdef _init(self, Loop loop, object callback, object context) + cdef _close(self) + cdef start(self, char* path, int flags) + cdef stop(self) + + @staticmethod + cdef UVFSEvent new(Loop loop, object callback, object context) diff --git a/hackaton/lib/python3.12/site-packages/uvloop/handles/fsevent.pyx b/hackaton/lib/python3.12/site-packages/uvloop/handles/fsevent.pyx new file mode 100644 index 0000000..6ed6433 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/uvloop/handles/fsevent.pyx @@ -0,0 +1,116 @@ +import enum + + +class FileSystemEvent(enum.IntEnum): + RENAME = uv.UV_RENAME + CHANGE = uv.UV_CHANGE + RENAME_CHANGE = RENAME | CHANGE + + +@cython.no_gc_clear +cdef class UVFSEvent(UVHandle): + cdef _init(self, Loop loop, object callback, object context): + cdef int err + + self._start_init(loop) + + self._handle = PyMem_RawMalloc( + sizeof(uv.uv_fs_event_t) + ) + if self._handle is NULL: + self._abort_init() + raise MemoryError() + + err = uv.uv_fs_event_init( + self._loop.uvloop, self._handle + ) + if err < 0: + self._abort_init() + raise convert_error(err) + + self._finish_init() + + self.running = 0 + self.callback = callback + if context is None: + context = Context_CopyCurrent() + self.context = context + + cdef start(self, char* path, int flags): + cdef int err + + self._ensure_alive() + + if self.running == 0: + err = uv.uv_fs_event_start( + self._handle, + __uvfsevent_callback, + path, + flags, + ) + if err < 0: + exc = convert_error(err) + self._fatal_error(exc, True) + return + self.running = 1 + + cdef stop(self): + cdef int err + + if not self._is_alive(): + self.running = 0 + return + + if self.running == 1: + err = uv.uv_fs_event_stop(self._handle) + self.running = 0 + if err < 0: + exc = convert_error(err) + self._fatal_error(exc, True) + return + + cdef _close(self): + try: + self.stop() + finally: + UVHandle._close(self) + + def cancel(self): + self._close() + + def cancelled(self): + return self.running == 0 + + @staticmethod + cdef UVFSEvent new(Loop loop, object callback, object context): + cdef UVFSEvent handle + handle = UVFSEvent.__new__(UVFSEvent) + handle._init(loop, callback, context) + return handle + + +cdef void __uvfsevent_callback( + uv.uv_fs_event_t* handle, + const char *filename, + int events, + int status, +) noexcept with gil: + if __ensure_handle_data( + handle, "UVFSEvent callback" + ) == 0: + return + + cdef: + UVFSEvent fs_event = handle.data + Handle h + + try: + h = new_Handle( + fs_event._loop, + fs_event.callback, + (filename, FileSystemEvent(events)), + fs_event.context, + ) + h._run() + except BaseException as ex: + fs_event._error(ex, False) diff --git a/hackaton/lib/python3.12/site-packages/uvloop/handles/handle.pxd b/hackaton/lib/python3.12/site-packages/uvloop/handles/handle.pxd new file mode 100644 index 0000000..5af1c14 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/uvloop/handles/handle.pxd @@ -0,0 +1,48 @@ +cdef class UVHandle: + cdef: + uv.uv_handle_t *_handle + Loop _loop + readonly _source_traceback + bint _closed + bint _inited + object context + + # Added to enable current UDPTransport implementation, + # which doesn't use libuv handles. + bint _has_handle + + # All "inline" methods are final + + cdef inline _start_init(self, Loop loop) + cdef inline _abort_init(self) + cdef inline _finish_init(self) + + cdef inline bint _is_alive(self) + cdef inline _ensure_alive(self) + + cdef _error(self, exc, throw) + cdef _fatal_error(self, exc, throw, reason=?) + + cdef _warn_unclosed(self) + + cdef _free(self) + cdef _close(self) + + +cdef class UVSocketHandle(UVHandle): + cdef: + # Points to a Python file-object that should be closed + # when the transport is closing. Used by pipes. This + # should probably be refactored somehow. + object _fileobj + object __cached_socket + + # All "inline" methods are final + + cdef _fileno(self) + + cdef _new_socket(self) + cdef inline _get_socket(self) + cdef inline _attach_fileobj(self, object file) + + cdef _open(self, int sockfd) diff --git a/hackaton/lib/python3.12/site-packages/uvloop/handles/handle.pyx b/hackaton/lib/python3.12/site-packages/uvloop/handles/handle.pyx new file mode 100644 index 0000000..2c96458 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/uvloop/handles/handle.pyx @@ -0,0 +1,395 @@ +cdef class UVHandle: + """A base class for all libuv handles. + + Automatically manages memory deallocation and closing. + + Important: + + 1. call "_ensure_alive()" before calling any libuv functions on + your handles. + + 2. call "__ensure_handle_data" in *all* libuv handle callbacks. + """ + + def __cinit__(self): + self._closed = 0 + self._inited = 0 + self._has_handle = 1 + self._handle = NULL + self._loop = None + self._source_traceback = None + + def __init__(self): + raise TypeError( + '{} is not supposed to be instantiated from Python'.format( + self.__class__.__name__)) + + def __dealloc__(self): + if UVLOOP_DEBUG: + if self._loop is not None: + if self._inited: + self._loop._debug_handles_current.subtract([ + self.__class__.__name__]) + else: + # No "@cython.no_gc_clear" decorator on this UVHandle + raise RuntimeError( + '{} without @no_gc_clear; loop was set to None by GC' + .format(self.__class__.__name__)) + + if self._handle is NULL: + return + + # -> When we're at this point, something is wrong <- + + if self._handle.loop is NULL: + # The handle wasn't initialized with "uv_{handle}_init" + self._closed = 1 + self._free() + raise RuntimeError( + '{} is open in __dealloc__ with loop set to NULL' + .format(self.__class__.__name__)) + + if self._closed: + # So _handle is not NULL and self._closed == 1? + raise RuntimeError( + '{}.__dealloc__: _handle is NULL, _closed == 1'.format( + self.__class__.__name__)) + + # The handle is dealloced while open. Let's try to close it. + # Situations when this is possible include unhandled exceptions, + # errors during Handle.__cinit__/__init__ etc. + if self._inited: + self._handle.data = NULL + uv.uv_close(self._handle, __uv_close_handle_cb) # void; no errors + self._handle = NULL + self._warn_unclosed() + else: + # The handle was allocated, but not initialized + self._closed = 1 + self._free() + + cdef _free(self): + if self._handle == NULL: + return + + if UVLOOP_DEBUG and self._inited: + self._loop._debug_uv_handles_freed += 1 + + PyMem_RawFree(self._handle) + self._handle = NULL + + cdef _warn_unclosed(self): + if self._source_traceback is not None: + try: + tb = ''.join(tb_format_list(self._source_traceback)) + tb = 'object created at (most recent call last):\n{}'.format( + tb.rstrip()) + except Exception as ex: + msg = ( + 'unclosed resource {!r}; could not serialize ' + 'debug traceback: {}: {}' + ).format(self, type(ex).__name__, ex) + else: + msg = 'unclosed resource {!r}; {}'.format(self, tb) + else: + msg = 'unclosed resource {!r}'.format(self) + warnings_warn(msg, ResourceWarning) + + cdef inline _abort_init(self): + if self._handle is not NULL: + self._free() + + try: + if UVLOOP_DEBUG: + name = self.__class__.__name__ + if self._inited: + raise RuntimeError( + '_abort_init: {}._inited is set'.format(name)) + if self._closed: + raise RuntimeError( + '_abort_init: {}._closed is set'.format(name)) + finally: + self._closed = 1 + + cdef inline _finish_init(self): + self._inited = 1 + if self._has_handle == 1: + self._handle.data = self + if self._loop._debug: + self._source_traceback = extract_stack() + if UVLOOP_DEBUG: + cls_name = self.__class__.__name__ + self._loop._debug_uv_handles_total += 1 + self._loop._debug_handles_total.update([cls_name]) + self._loop._debug_handles_current.update([cls_name]) + + cdef inline _start_init(self, Loop loop): + if UVLOOP_DEBUG: + if self._loop is not None: + raise RuntimeError( + '{}._start_init can only be called once'.format( + self.__class__.__name__)) + + self._loop = loop + + cdef inline bint _is_alive(self): + cdef bint res + res = self._closed != 1 and self._inited == 1 + if UVLOOP_DEBUG: + if res and self._has_handle == 1: + name = self.__class__.__name__ + if self._handle is NULL: + raise RuntimeError( + '{} is alive, but _handle is NULL'.format(name)) + if self._loop is None: + raise RuntimeError( + '{} is alive, but _loop is None'.format(name)) + if self._handle.loop is not self._loop.uvloop: + raise RuntimeError( + '{} is alive, but _handle.loop is not ' + 'initialized'.format(name)) + if self._handle.data is not self: + raise RuntimeError( + '{} is alive, but _handle.data is not ' + 'initialized'.format(name)) + return res + + cdef inline _ensure_alive(self): + if not self._is_alive(): + raise RuntimeError( + 'unable to perform operation on {!r}; ' + 'the handler is closed'.format(self)) + + cdef _fatal_error(self, exc, throw, reason=None): + # Fatal error means an error that was returned by the + # underlying libuv handle function. We usually can't + # recover from that, hence we just close the handle. + self._close() + + if throw or self._loop is None: + raise exc + else: + self._loop._handle_exception(exc) + + cdef _error(self, exc, throw): + # A non-fatal error is usually an error that was caught + # by the handler, but was originated in the client code + # (not in libuv). In this case we either want to simply + # raise or log it. + if throw or self._loop is None: + raise exc + else: + self._loop._handle_exception(exc) + + cdef _close(self): + if self._closed == 1: + return + + self._closed = 1 + + if self._handle is NULL: + return + + if UVLOOP_DEBUG: + if self._handle.data is NULL: + raise RuntimeError( + '{}._close: _handle.data is NULL'.format( + self.__class__.__name__)) + + if self._handle.data is not self: + raise RuntimeError( + '{}._close: _handle.data is not UVHandle/self'.format( + self.__class__.__name__)) + + if uv.uv_is_closing(self._handle): + raise RuntimeError( + '{}._close: uv_is_closing() is true'.format( + self.__class__.__name__)) + + # We want the handle wrapper (UVHandle) to stay alive until + # the closing callback fires. + Py_INCREF(self) + uv.uv_close(self._handle, __uv_close_handle_cb) # void; no errors + + def __repr__(self): + return '<{} closed={} {:#x}>'.format( + self.__class__.__name__, + self._closed, + id(self)) + + +cdef class UVSocketHandle(UVHandle): + + def __cinit__(self): + self._fileobj = None + self.__cached_socket = None + + cdef _fileno(self): + cdef: + int fd + int err + + self._ensure_alive() + err = uv.uv_fileno(self._handle, &fd) + if err < 0: + raise convert_error(err) + + return fd + + cdef _new_socket(self): + raise NotImplementedError + + cdef inline _get_socket(self): + if self.__cached_socket is not None: + return self.__cached_socket + + if not self._is_alive(): + return None + + self.__cached_socket = self._new_socket() + if UVLOOP_DEBUG: + # We don't "dup" for the "__cached_socket". + assert self.__cached_socket.fileno() == self._fileno() + return self.__cached_socket + + cdef inline _attach_fileobj(self, object file): + # When we create a TCP/PIPE/etc connection/server based on + # a Python file object, we need to close the file object when + # the uv handle is closed. + socket_inc_io_ref(file) + self._fileobj = file + + cdef _close(self): + if self.__cached_socket is not None: + (self.__cached_socket)._fd = -1 + + UVHandle._close(self) + + try: + # This code will only run for transports created from + # Python sockets, i.e. with `loop.create_server(sock=sock)` etc. + if self._fileobj is not None: + if isinstance(self._fileobj, socket_socket): + # Detaching the socket object is the ideal solution: + # * libuv will actually close the FD; + # * detach() call will reset FD for the Python socket + # object, which means that it won't be closed 2nd time + # when the socket object is GCed. + # + # No need to call `socket_dec_io_ref()`, as + # `socket.detach()` ignores `socket._io_refs`. + self._fileobj.detach() + else: + try: + # `socket.close()` will raise an EBADF because libuv + # has already closed the underlying FD. + self._fileobj.close() + except OSError as ex: + if ex.errno != errno_EBADF: + raise + except Exception as ex: + self._loop.call_exception_handler({ + 'exception': ex, + 'transport': self, + 'message': f'could not close attached file object ' + f'{self._fileobj!r}', + }) + finally: + self._fileobj = None + + cdef _open(self, int sockfd): + raise NotImplementedError + + +cdef inline bint __ensure_handle_data(uv.uv_handle_t* handle, + const char* handle_ctx): + + cdef Loop loop + + if UVLOOP_DEBUG: + if handle.loop is NULL: + raise RuntimeError( + 'handle.loop is NULL in __ensure_handle_data') + + if handle.loop.data is NULL: + raise RuntimeError( + 'handle.loop.data is NULL in __ensure_handle_data') + + if handle.data is NULL: + loop = handle.loop.data + loop.call_exception_handler({ + 'message': '{} called with handle.data == NULL'.format( + handle_ctx.decode('latin-1')) + }) + return 0 + + if handle.data is NULL: + # The underlying UVHandle object was GCed with an open uv_handle_t. + loop = handle.loop.data + loop.call_exception_handler({ + 'message': '{} called after destroying the UVHandle'.format( + handle_ctx.decode('latin-1')) + }) + return 0 + + return 1 + + +cdef void __uv_close_handle_cb(uv.uv_handle_t* handle) noexcept with gil: + cdef UVHandle h + + if handle.data is NULL: + # The original UVHandle is long dead. Just free the mem of + # the uv_handle_t* handler. + + if UVLOOP_DEBUG: + if handle.loop == NULL or handle.loop.data == NULL: + raise RuntimeError( + '__uv_close_handle_cb: handle.loop is invalid') + (handle.loop.data)._debug_uv_handles_freed += 1 + + PyMem_RawFree(handle) + else: + h = handle.data + try: + if UVLOOP_DEBUG: + if not h._has_handle: + raise RuntimeError( + 'has_handle=0 in __uv_close_handle_cb') + h._loop._debug_handles_closed.update([ + h.__class__.__name__]) + h._free() + finally: + Py_DECREF(h) # Was INCREFed in UVHandle._close + + +cdef void __close_all_handles(Loop loop) noexcept: + uv.uv_walk(loop.uvloop, + __uv_walk_close_all_handles_cb, + loop) # void + + +cdef void __uv_walk_close_all_handles_cb( + uv.uv_handle_t* handle, + void* arg, +) noexcept with gil: + + cdef: + Loop loop = arg + UVHandle h + + if uv.uv_is_closing(handle): + # The handle is closed or is closing. + return + + if handle.data is NULL: + # This shouldn't happen. Ever. + loop.call_exception_handler({ + 'message': 'handle.data is NULL in __close_all_handles_cb' + }) + return + + h = handle.data + if not h._closed: + h._warn_unclosed() + h._close() diff --git a/hackaton/lib/python3.12/site-packages/uvloop/handles/idle.pxd b/hackaton/lib/python3.12/site-packages/uvloop/handles/idle.pxd new file mode 100644 index 0000000..cf7b19f --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/uvloop/handles/idle.pxd @@ -0,0 +1,14 @@ +cdef class UVIdle(UVHandle): + cdef: + Handle h + bint running + + # All "inline" methods are final + + cdef _init(self, Loop loop, Handle h) + + cdef inline stop(self) + cdef inline start(self) + + @staticmethod + cdef UVIdle new(Loop loop, Handle h) diff --git a/hackaton/lib/python3.12/site-packages/uvloop/handles/idle.pyx b/hackaton/lib/python3.12/site-packages/uvloop/handles/idle.pyx new file mode 100644 index 0000000..91c641f --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/uvloop/handles/idle.pyx @@ -0,0 +1,72 @@ +@cython.no_gc_clear +cdef class UVIdle(UVHandle): + cdef _init(self, Loop loop, Handle h): + cdef int err + + self._start_init(loop) + + self._handle = PyMem_RawMalloc(sizeof(uv.uv_idle_t)) + if self._handle is NULL: + self._abort_init() + raise MemoryError() + + err = uv.uv_idle_init(self._loop.uvloop, self._handle) + if err < 0: + self._abort_init() + raise convert_error(err) + + self._finish_init() + + self.h = h + self.running = 0 + + cdef inline stop(self): + cdef int err + + if not self._is_alive(): + self.running = 0 + return + + if self.running == 1: + err = uv.uv_idle_stop(self._handle) + self.running = 0 + if err < 0: + exc = convert_error(err) + self._fatal_error(exc, True) + return + + cdef inline start(self): + cdef int err + + self._ensure_alive() + + if self.running == 0: + err = uv.uv_idle_start(self._handle, + cb_idle_callback) + if err < 0: + exc = convert_error(err) + self._fatal_error(exc, True) + return + self.running = 1 + + @staticmethod + cdef UVIdle new(Loop loop, Handle h): + cdef UVIdle handle + handle = UVIdle.__new__(UVIdle) + handle._init(loop, h) + return handle + + +cdef void cb_idle_callback( + uv.uv_idle_t* handle, +) noexcept with gil: + if __ensure_handle_data(handle, "UVIdle callback") == 0: + return + + cdef: + UVIdle idle = handle.data + Handle h = idle.h + try: + h._run() + except BaseException as ex: + idle._error(ex, False) diff --git a/hackaton/lib/python3.12/site-packages/uvloop/handles/pipe.pxd b/hackaton/lib/python3.12/site-packages/uvloop/handles/pipe.pxd new file mode 100644 index 0000000..56fc265 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/uvloop/handles/pipe.pxd @@ -0,0 +1,33 @@ +cdef class UnixServer(UVStreamServer): + + cdef bind(self, str path) + + @staticmethod + cdef UnixServer new(Loop loop, object protocol_factory, Server server, + object backlog, + object ssl, + object ssl_handshake_timeout, + object ssl_shutdown_timeout) + + +cdef class UnixTransport(UVStream): + + @staticmethod + cdef UnixTransport new(Loop loop, object protocol, Server server, + object waiter, object context) + + cdef connect(self, char* addr) + + +cdef class ReadUnixTransport(UVStream): + + @staticmethod + cdef ReadUnixTransport new(Loop loop, object protocol, Server server, + object waiter) + + +cdef class WriteUnixTransport(UVStream): + + @staticmethod + cdef WriteUnixTransport new(Loop loop, object protocol, Server server, + object waiter) diff --git a/hackaton/lib/python3.12/site-packages/uvloop/handles/pipe.pyx b/hackaton/lib/python3.12/site-packages/uvloop/handles/pipe.pyx new file mode 100644 index 0000000..4b95ed6 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/uvloop/handles/pipe.pyx @@ -0,0 +1,247 @@ +cdef __pipe_init_uv_handle(UVStream handle, Loop loop): + cdef int err + + handle._handle = PyMem_RawMalloc(sizeof(uv.uv_pipe_t)) + if handle._handle is NULL: + handle._abort_init() + raise MemoryError() + + # Initialize pipe handle with ipc=0. + # ipc=1 means that libuv will use recvmsg/sendmsg + # instead of recv/send. + err = uv.uv_pipe_init(handle._loop.uvloop, + handle._handle, + 0) + # UV_HANDLE_READABLE allows calling uv_read_start() on this pipe + # even if it is O_WRONLY, see also #317, libuv/libuv#2058 + handle._handle.flags |= uv.UV_INTERNAL_HANDLE_READABLE + if err < 0: + handle._abort_init() + raise convert_error(err) + + handle._finish_init() + + +cdef __pipe_open(UVStream handle, int fd): + cdef int err + err = uv.uv_pipe_open(handle._handle, + fd) + if err < 0: + exc = convert_error(err) + raise exc + + +cdef __pipe_get_socket(UVSocketHandle handle): + fileno = handle._fileno() + return PseudoSocket(uv.AF_UNIX, uv.SOCK_STREAM, 0, fileno) + + +@cython.no_gc_clear +cdef class UnixServer(UVStreamServer): + + @staticmethod + cdef UnixServer new(Loop loop, object protocol_factory, Server server, + object backlog, + object ssl, + object ssl_handshake_timeout, + object ssl_shutdown_timeout): + + cdef UnixServer handle + handle = UnixServer.__new__(UnixServer) + handle._init(loop, protocol_factory, server, backlog, + ssl, ssl_handshake_timeout, ssl_shutdown_timeout) + __pipe_init_uv_handle(handle, loop) + return handle + + cdef _new_socket(self): + return __pipe_get_socket(self) + + cdef _open(self, int sockfd): + self._ensure_alive() + __pipe_open(self, sockfd) + self._mark_as_open() + + cdef bind(self, str path): + cdef int err + self._ensure_alive() + err = uv.uv_pipe_bind(self._handle, + path.encode()) + if err < 0: + exc = convert_error(err) + self._fatal_error(exc, True) + return + + self._mark_as_open() + + cdef UVStream _make_new_transport(self, object protocol, object waiter, + object context): + cdef UnixTransport tr + tr = UnixTransport.new(self._loop, protocol, self._server, waiter, + context) + return tr + + cdef _close(self): + sock = self._fileobj + if sock is not None and sock in self._loop._unix_server_sockets: + path = sock.getsockname() + else: + path = None + + UVStreamServer._close(self) + + if path is not None: + prev_ino = self._loop._unix_server_sockets[sock] + del self._loop._unix_server_sockets[sock] + try: + if os_stat(path).st_ino == prev_ino: + os_unlink(path) + except FileNotFoundError: + pass + except OSError as err: + aio_logger.error('Unable to clean up listening UNIX socket ' + '%r: %r', path, err) + + +@cython.no_gc_clear +cdef class UnixTransport(UVStream): + + @staticmethod + cdef UnixTransport new(Loop loop, object protocol, Server server, + object waiter, object context): + + cdef UnixTransport handle + handle = UnixTransport.__new__(UnixTransport) + handle._init(loop, protocol, server, waiter, context) + __pipe_init_uv_handle(handle, loop) + return handle + + cdef _new_socket(self): + return __pipe_get_socket(self) + + cdef _open(self, int sockfd): + __pipe_open(self, sockfd) + + cdef connect(self, char* addr): + cdef _PipeConnectRequest req + req = _PipeConnectRequest(self._loop, self) + req.connect(addr) + + +@cython.no_gc_clear +cdef class ReadUnixTransport(UVStream): + + @staticmethod + cdef ReadUnixTransport new(Loop loop, object protocol, Server server, + object waiter): + cdef ReadUnixTransport handle + handle = ReadUnixTransport.__new__(ReadUnixTransport) + # This is only used in connect_read_pipe() and subprocess_shell/exec() + # directly, we could simply copy the current context. + handle._init(loop, protocol, server, waiter, Context_CopyCurrent()) + __pipe_init_uv_handle(handle, loop) + return handle + + cdef _new_socket(self): + return __pipe_get_socket(self) + + cdef _open(self, int sockfd): + __pipe_open(self, sockfd) + + def get_write_buffer_limits(self): + raise NotImplementedError + + def set_write_buffer_limits(self, high=None, low=None): + raise NotImplementedError + + def get_write_buffer_size(self): + raise NotImplementedError + + def write(self, data): + raise NotImplementedError + + def writelines(self, list_of_data): + raise NotImplementedError + + def write_eof(self): + raise NotImplementedError + + def can_write_eof(self): + raise NotImplementedError + + def abort(self): + raise NotImplementedError + + +@cython.no_gc_clear +cdef class WriteUnixTransport(UVStream): + + @staticmethod + cdef WriteUnixTransport new(Loop loop, object protocol, Server server, + object waiter): + cdef WriteUnixTransport handle + handle = WriteUnixTransport.__new__(WriteUnixTransport) + + # We listen for read events on write-end of the pipe. When + # the read-end is close, the uv_stream_t.read callback will + # receive an error -- we want to silence that error, and just + # close the transport. + handle._close_on_read_error() + + # This is only used in connect_write_pipe() and subprocess_shell/exec() + # directly, we could simply copy the current context. + handle._init(loop, protocol, server, waiter, Context_CopyCurrent()) + __pipe_init_uv_handle(handle, loop) + return handle + + cdef _new_socket(self): + return __pipe_get_socket(self) + + cdef _open(self, int sockfd): + __pipe_open(self, sockfd) + + def pause_reading(self): + raise NotImplementedError + + def resume_reading(self): + raise NotImplementedError + + +cdef class _PipeConnectRequest(UVRequest): + cdef: + UnixTransport transport + uv.uv_connect_t _req_data + + def __cinit__(self, loop, transport): + self.request = &self._req_data + self.request.data = self + self.transport = transport + + cdef connect(self, char* addr): + # uv_pipe_connect returns void + uv.uv_pipe_connect(self.request, + self.transport._handle, + addr, + __pipe_connect_callback) + +cdef void __pipe_connect_callback( + uv.uv_connect_t* req, + int status, +) noexcept with gil: + cdef: + _PipeConnectRequest wrapper + UnixTransport transport + + wrapper = <_PipeConnectRequest> req.data + transport = wrapper.transport + + if status < 0: + exc = convert_error(status) + else: + exc = None + + try: + transport._on_connect(exc) + except BaseException as ex: + wrapper.transport._fatal_error(ex, False) + finally: + wrapper.on_done() diff --git a/hackaton/lib/python3.12/site-packages/uvloop/handles/poll.pxd b/hackaton/lib/python3.12/site-packages/uvloop/handles/poll.pxd new file mode 100644 index 0000000..c220540 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/uvloop/handles/poll.pxd @@ -0,0 +1,25 @@ +cdef class UVPoll(UVHandle): + cdef: + int fd + Handle reading_handle + Handle writing_handle + + cdef _init(self, Loop loop, int fd) + cdef _close(self) + + cdef inline _poll_start(self, int flags) + cdef inline _poll_stop(self) + + cdef int is_active(self) noexcept + + cdef is_reading(self) + cdef is_writing(self) + + cdef start_reading(self, Handle callback) + cdef start_writing(self, Handle callback) + cdef stop_reading(self) + cdef stop_writing(self) + cdef stop(self) + + @staticmethod + cdef UVPoll new(Loop loop, int fd) diff --git a/hackaton/lib/python3.12/site-packages/uvloop/handles/poll.pyx b/hackaton/lib/python3.12/site-packages/uvloop/handles/poll.pyx new file mode 100644 index 0000000..c905e9b --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/uvloop/handles/poll.pyx @@ -0,0 +1,233 @@ +@cython.no_gc_clear +cdef class UVPoll(UVHandle): + cdef _init(self, Loop loop, int fd): + cdef int err + + self._start_init(loop) + + self._handle = PyMem_RawMalloc(sizeof(uv.uv_poll_t)) + if self._handle is NULL: + self._abort_init() + raise MemoryError() + + err = uv.uv_poll_init(self._loop.uvloop, + self._handle, fd) + if err < 0: + self._abort_init() + raise convert_error(err) + + self._finish_init() + + self.fd = fd + self.reading_handle = None + self.writing_handle = None + + @staticmethod + cdef UVPoll new(Loop loop, int fd): + cdef UVPoll handle + handle = UVPoll.__new__(UVPoll) + handle._init(loop, fd) + return handle + + cdef int is_active(self) noexcept: + return (self.reading_handle is not None or + self.writing_handle is not None) + + cdef inline _poll_start(self, int flags): + cdef int err + + self._ensure_alive() + + err = uv.uv_poll_start( + self._handle, + flags, + __on_uvpoll_event) + + if err < 0: + exc = convert_error(err) + self._fatal_error(exc, True) + return + + cdef inline _poll_stop(self): + cdef int err + + if not self._is_alive(): + return + + err = uv.uv_poll_stop(self._handle) + if err < 0: + exc = convert_error(err) + self._fatal_error(exc, True) + return + + cdef: + int backend_id + system.epoll_event dummy_event + + if system.PLATFORM_IS_LINUX: + # libuv doesn't remove the FD from epoll immediately + # after uv_poll_stop or uv_poll_close, causing hard + # to debug issue with dup-ed file descriptors causing + # CPU burn in epoll/epoll_ctl: + # https://github.com/MagicStack/uvloop/issues/61 + # + # It's safe though to manually call epoll_ctl here, + # after calling uv_poll_stop. + + backend_id = uv.uv_backend_fd(self._loop.uvloop) + if backend_id != -1: + memset(&dummy_event, 0, sizeof(dummy_event)) + system.epoll_ctl( + backend_id, + system.EPOLL_CTL_DEL, + self.fd, + &dummy_event) # ignore errors + + cdef is_reading(self): + return self._is_alive() and self.reading_handle is not None + + cdef is_writing(self): + return self._is_alive() and self.writing_handle is not None + + cdef start_reading(self, Handle callback): + cdef: + int mask = 0 + + if self.reading_handle is None: + # not reading right now, setup the handle + + mask = uv.UV_READABLE + if self.writing_handle is not None: + # are we writing right now? + mask |= uv.UV_WRITABLE + + self._poll_start(mask) + else: + self.reading_handle._cancel() + + self.reading_handle = callback + + cdef start_writing(self, Handle callback): + cdef: + int mask = 0 + + if self.writing_handle is None: + # not writing right now, setup the handle + + mask = uv.UV_WRITABLE + if self.reading_handle is not None: + # are we reading right now? + mask |= uv.UV_READABLE + + self._poll_start(mask) + else: + self.writing_handle._cancel() + + self.writing_handle = callback + + cdef stop_reading(self): + if self.reading_handle is None: + return False + + self.reading_handle._cancel() + self.reading_handle = None + + if self.writing_handle is None: + self.stop() + else: + self._poll_start(uv.UV_WRITABLE) + + return True + + cdef stop_writing(self): + if self.writing_handle is None: + return False + + self.writing_handle._cancel() + self.writing_handle = None + + if self.reading_handle is None: + self.stop() + else: + self._poll_start(uv.UV_READABLE) + + return True + + cdef stop(self): + if self.reading_handle is not None: + self.reading_handle._cancel() + self.reading_handle = None + + if self.writing_handle is not None: + self.writing_handle._cancel() + self.writing_handle = None + + self._poll_stop() + + cdef _close(self): + if self.is_active(): + self.stop() + + UVHandle._close(self) + + cdef _fatal_error(self, exc, throw, reason=None): + try: + if self.reading_handle is not None: + try: + self.reading_handle._run() + except BaseException as ex: + self._loop._handle_exception(ex) + self.reading_handle = None + + if self.writing_handle is not None: + try: + self.writing_handle._run() + except BaseException as ex: + self._loop._handle_exception(ex) + self.writing_handle = None + + finally: + self._close() + + +cdef void __on_uvpoll_event( + uv.uv_poll_t* handle, + int status, + int events, +) noexcept with gil: + + if __ensure_handle_data(handle, "UVPoll callback") == 0: + return + + cdef: + UVPoll poll = handle.data + + if status < 0: + exc = convert_error(status) + poll._fatal_error(exc, False) + return + + if ((events & (uv.UV_READABLE | uv.UV_DISCONNECT)) and + poll.reading_handle is not None): + + try: + if UVLOOP_DEBUG: + poll._loop._poll_read_events_total += 1 + poll.reading_handle._run() + except BaseException as ex: + if UVLOOP_DEBUG: + poll._loop._poll_read_cb_errors_total += 1 + poll._error(ex, False) + # continue code execution + + if ((events & (uv.UV_WRITABLE | uv.UV_DISCONNECT)) and + poll.writing_handle is not None): + + try: + if UVLOOP_DEBUG: + poll._loop._poll_write_events_total += 1 + poll.writing_handle._run() + except BaseException as ex: + if UVLOOP_DEBUG: + poll._loop._poll_write_cb_errors_total += 1 + poll._error(ex, False) diff --git a/hackaton/lib/python3.12/site-packages/uvloop/handles/process.pxd b/hackaton/lib/python3.12/site-packages/uvloop/handles/process.pxd new file mode 100644 index 0000000..970abcf --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/uvloop/handles/process.pxd @@ -0,0 +1,80 @@ +cdef class UVProcess(UVHandle): + cdef: + object _returncode + object _pid + + object _errpipe_read + object _errpipe_write + object _preexec_fn + bint _restore_signals + + list _fds_to_close + + # Attributes used to compose uv_process_options_t: + uv.uv_process_options_t options + uv.uv_stdio_container_t[3] iocnt + list __env + char **uv_opt_env + list __args + char **uv_opt_args + char *uv_opt_file + bytes __cwd + + cdef _close_process_handle(self) + + cdef _init(self, Loop loop, list args, dict env, cwd, + start_new_session, + _stdin, _stdout, _stderr, pass_fds, + debug_flags, preexec_fn, restore_signals) + + cdef _after_fork(self) + + cdef char** __to_cstring_array(self, list arr) + cdef _init_args(self, list args) + cdef _init_env(self, dict env) + cdef _init_files(self, _stdin, _stdout, _stderr) + cdef _init_options(self, list args, dict env, cwd, start_new_session, + _stdin, _stdout, _stderr, bint force_fork) + + cdef _close_after_spawn(self, int fd) + + cdef _on_exit(self, int64_t exit_status, int term_signal) + cdef _kill(self, int signum) + + +cdef class UVProcessTransport(UVProcess): + cdef: + list _exit_waiters + list _init_futs + bint _stdio_ready + list _pending_calls + object _protocol + bint _finished + + WriteUnixTransport _stdin + ReadUnixTransport _stdout + ReadUnixTransport _stderr + + object stdin_proto + object stdout_proto + object stderr_proto + + cdef _file_redirect_stdio(self, int fd) + cdef _file_devnull(self) + cdef _file_inpipe(self) + cdef _file_outpipe(self) + + cdef _check_proc(self) + cdef _pipe_connection_lost(self, int fd, exc) + cdef _pipe_data_received(self, int fd, data) + + cdef _call_connection_made(self, waiter) + cdef _try_finish(self) + + @staticmethod + cdef UVProcessTransport new(Loop loop, protocol, args, env, cwd, + start_new_session, + _stdin, _stdout, _stderr, pass_fds, + waiter, + debug_flags, + preexec_fn, restore_signals) diff --git a/hackaton/lib/python3.12/site-packages/uvloop/handles/process.pyx b/hackaton/lib/python3.12/site-packages/uvloop/handles/process.pyx new file mode 100644 index 0000000..63b982a --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/uvloop/handles/process.pyx @@ -0,0 +1,792 @@ +@cython.no_gc_clear +cdef class UVProcess(UVHandle): + """Abstract class; wrapper over uv_process_t handle.""" + + def __cinit__(self): + self.uv_opt_env = NULL + self.uv_opt_args = NULL + self._returncode = None + self._pid = None + self._fds_to_close = list() + self._preexec_fn = None + self._restore_signals = True + self.context = Context_CopyCurrent() + + cdef _close_process_handle(self): + # XXX: This is a workaround for a libuv bug: + # - https://github.com/libuv/libuv/issues/1933 + # - https://github.com/libuv/libuv/pull/551 + if self._handle is NULL: + return + self._handle.data = NULL + uv.uv_close(self._handle, __uv_close_process_handle_cb) + self._handle = NULL # close callback will free() the memory + + cdef _init(self, Loop loop, list args, dict env, + cwd, start_new_session, + _stdin, _stdout, _stderr, # std* can be defined as macros in C + pass_fds, debug_flags, preexec_fn, restore_signals): + + global __forking + global __forking_loop + global __forkHandler + + cdef int err + + self._start_init(loop) + + self._handle = PyMem_RawMalloc( + sizeof(uv.uv_process_t)) + if self._handle is NULL: + self._abort_init() + raise MemoryError() + + # Too early to call _finish_init, but still a lot of work to do. + # Let's set handle.data to NULL, so in case something goes wrong, + # callbacks have a chance to avoid casting *something* into UVHandle. + self._handle.data = NULL + + force_fork = False + if system.PLATFORM_IS_APPLE and not ( + preexec_fn is None + and not pass_fds + ): + # see _execute_child() in CPython/subprocess.py + force_fork = True + + try: + self._init_options(args, env, cwd, start_new_session, + _stdin, _stdout, _stderr, force_fork) + + restore_inheritable = set() + if pass_fds: + for fd in pass_fds: + if not os_get_inheritable(fd): + restore_inheritable.add(fd) + os_set_inheritable(fd, True) + except Exception: + self._abort_init() + raise + + if __forking or loop.active_process_handler is not None: + # Our pthread_atfork handlers won't work correctly when + # another loop is forking in another thread (even though + # GIL should help us to avoid that.) + self._abort_init() + raise RuntimeError( + 'Racing with another loop to spawn a process.') + + self._errpipe_read, self._errpipe_write = os_pipe() + fds_to_close = self._fds_to_close + self._fds_to_close = None + fds_to_close.append(self._errpipe_read) + # add the write pipe last so we can close it early + fds_to_close.append(self._errpipe_write) + try: + os_set_inheritable(self._errpipe_write, True) + + self._preexec_fn = preexec_fn + self._restore_signals = restore_signals + + loop.active_process_handler = self + __forking = 1 + __forking_loop = loop + system.setForkHandler(&__get_fork_handler) + + PyOS_BeforeFork() + + err = uv.uv_spawn(loop.uvloop, + self._handle, + &self.options) + + __forking = 0 + __forking_loop = None + system.resetForkHandler() + loop.active_process_handler = None + + PyOS_AfterFork_Parent() + + if err < 0: + self._close_process_handle() + self._abort_init() + raise convert_error(err) + + self._finish_init() + + # close the write pipe early + os_close(fds_to_close.pop()) + + if preexec_fn is not None: + errpipe_data = bytearray() + while True: + # XXX: This is a blocking code that has to be + # rewritten (using loop.connect_read_pipe() or + # otherwise.) + part = os_read(self._errpipe_read, 50000) + errpipe_data += part + if not part or len(errpipe_data) > 50000: + break + + finally: + while fds_to_close: + os_close(fds_to_close.pop()) + + for fd in restore_inheritable: + os_set_inheritable(fd, False) + + # asyncio caches the PID in BaseSubprocessTransport, + # so that the transport knows what the PID was even + # after the process is finished. + self._pid = (self._handle).pid + + # Track the process handle (create a strong ref to it) + # to guarantee that __dealloc__ doesn't happen in an + # uncontrolled fashion. We want to wait until the process + # exits and libuv calls __uvprocess_on_exit_callback, + # which will call `UVProcess._close()`, which will, in turn, + # untrack this handle. + self._loop._track_process(self) + + if debug_flags & __PROCESS_DEBUG_SLEEP_AFTER_FORK: + time_sleep(1) + + if preexec_fn is not None and errpipe_data: + # preexec_fn has raised an exception. The child + # process must be dead now. + try: + exc_name, exc_msg = errpipe_data.split(b':', 1) + exc_name = exc_name.decode() + exc_msg = exc_msg.decode() + except Exception: + self._close() + raise subprocess_SubprocessError( + 'Bad exception data from child: {!r}'.format( + errpipe_data)) + exc_cls = getattr(__builtins__, exc_name, + subprocess_SubprocessError) + + exc = subprocess_SubprocessError( + 'Exception occurred in preexec_fn.') + exc.__cause__ = exc_cls(exc_msg) + self._close() + raise exc + + cdef _after_fork(self): + # See CPython/_posixsubprocess.c for details + cdef int err + + if self._restore_signals: + _Py_RestoreSignals() + + PyOS_AfterFork_Child() + + err = uv.uv_loop_fork(self._loop.uvloop) + if err < 0: + raise convert_error(err) + + if self._preexec_fn is not None: + try: + gc_disable() + self._preexec_fn() + except BaseException as ex: + try: + with open(self._errpipe_write, 'wb') as f: + f.write(str(ex.__class__.__name__).encode()) + f.write(b':') + f.write(str(ex.args[0]).encode()) + finally: + system._exit(255) + return + else: + os_close(self._errpipe_write) + else: + os_close(self._errpipe_write) + + cdef _close_after_spawn(self, int fd): + if self._fds_to_close is None: + raise RuntimeError( + 'UVProcess._close_after_spawn called after uv_spawn') + self._fds_to_close.append(fd) + + def __dealloc__(self): + if self.uv_opt_env is not NULL: + PyMem_RawFree(self.uv_opt_env) + self.uv_opt_env = NULL + + if self.uv_opt_args is not NULL: + PyMem_RawFree(self.uv_opt_args) + self.uv_opt_args = NULL + + cdef char** __to_cstring_array(self, list arr): + cdef: + int i + ssize_t arr_len = len(arr) + bytes el + + char **ret + + ret = PyMem_RawMalloc((arr_len + 1) * sizeof(char *)) + if ret is NULL: + raise MemoryError() + + for i in range(arr_len): + el = arr[i] + # NB: PyBytes_AsString doesn't copy the data; + # we have to be careful when the "arr" is GCed, + # and it shouldn't be ever mutated. + ret[i] = PyBytes_AsString(el) + + ret[arr_len] = NULL + return ret + + cdef _init_options(self, list args, dict env, cwd, start_new_session, + _stdin, _stdout, _stderr, bint force_fork): + + memset(&self.options, 0, sizeof(uv.uv_process_options_t)) + + self._init_env(env) + self.options.env = self.uv_opt_env + + self._init_args(args) + self.options.file = self.uv_opt_file + self.options.args = self.uv_opt_args + + if start_new_session: + self.options.flags |= uv.UV_PROCESS_DETACHED + + if force_fork: + # This is a hack to work around the change in libuv 1.44: + # > macos: use posix_spawn instead of fork + # where Python subprocess options like preexec_fn are + # crippled. CPython only uses posix_spawn under a pretty + # strict list of conditions (see subprocess.py), and falls + # back to using fork() otherwise. We'd like to simulate such + # behavior with libuv, but unfortunately libuv doesn't + # provide explicit API to choose such implementation detail. + # Based on current (libuv 1.46) behavior, setting + # UV_PROCESS_SETUID or UV_PROCESS_SETGID would reliably make + # libuv fallback to use fork, so let's just use it for now. + self.options.flags |= uv.UV_PROCESS_SETUID + self.options.uid = uv.getuid() + + if cwd is not None: + cwd = os_fspath(cwd) + + if isinstance(cwd, str): + cwd = PyUnicode_EncodeFSDefault(cwd) + if not isinstance(cwd, bytes): + raise ValueError('cwd must be a str or bytes object') + + self.__cwd = cwd + self.options.cwd = PyBytes_AsString(self.__cwd) + + self.options.exit_cb = &__uvprocess_on_exit_callback + + self._init_files(_stdin, _stdout, _stderr) + + cdef _init_args(self, list args): + cdef: + bytes path + int an = len(args) + + if an < 1: + raise ValueError('cannot spawn a process: args are empty') + + self.__args = args.copy() + for i in range(an): + arg = os_fspath(args[i]) + if isinstance(arg, str): + self.__args[i] = PyUnicode_EncodeFSDefault(arg) + elif not isinstance(arg, bytes): + raise TypeError('all args must be str or bytes') + + path = self.__args[0] + self.uv_opt_file = PyBytes_AsString(path) + self.uv_opt_args = self.__to_cstring_array(self.__args) + + cdef _init_env(self, dict env): + if env is not None: + self.__env = list() + for key in env: + val = env[key] + + if isinstance(key, str): + key = PyUnicode_EncodeFSDefault(key) + elif not isinstance(key, bytes): + raise TypeError( + 'all environment vars must be bytes or str') + + if isinstance(val, str): + val = PyUnicode_EncodeFSDefault(val) + elif not isinstance(val, bytes): + raise TypeError( + 'all environment values must be bytes or str') + + self.__env.append(key + b'=' + val) + + self.uv_opt_env = self.__to_cstring_array(self.__env) + else: + self.__env = None + + cdef _init_files(self, _stdin, _stdout, _stderr): + self.options.stdio_count = 0 + + cdef _kill(self, int signum): + cdef int err + self._ensure_alive() + err = uv.uv_process_kill(self._handle, signum) + if err < 0: + raise convert_error(err) + + cdef _on_exit(self, int64_t exit_status, int term_signal): + if term_signal: + # From Python docs: + # A negative value -N indicates that the child was + # terminated by signal N (POSIX only). + self._returncode = -term_signal + else: + self._returncode = exit_status + + self._close() + + cdef _close(self): + try: + if self._loop is not None: + self._loop._untrack_process(self) + finally: + UVHandle._close(self) + + +DEF _CALL_PIPE_DATA_RECEIVED = 0 +DEF _CALL_PIPE_CONNECTION_LOST = 1 +DEF _CALL_PROCESS_EXITED = 2 +DEF _CALL_CONNECTION_LOST = 3 + + +@cython.no_gc_clear +cdef class UVProcessTransport(UVProcess): + def __cinit__(self): + self._exit_waiters = [] + self._protocol = None + + self._init_futs = [] + self._pending_calls = [] + self._stdio_ready = 0 + + self._stdin = self._stdout = self._stderr = None + self.stdin_proto = self.stdout_proto = self.stderr_proto = None + + self._finished = 0 + + cdef _on_exit(self, int64_t exit_status, int term_signal): + UVProcess._on_exit(self, exit_status, term_signal) + + if self._stdio_ready: + self._loop.call_soon(self._protocol.process_exited, + context=self.context) + else: + self._pending_calls.append((_CALL_PROCESS_EXITED, None, None)) + + self._try_finish() + + for waiter in self._exit_waiters: + if not waiter.cancelled(): + waiter.set_result(self._returncode) + self._exit_waiters.clear() + + self._close() + + cdef _check_proc(self): + if not self._is_alive() or self._returncode is not None: + raise ProcessLookupError() + + cdef _pipe_connection_lost(self, int fd, exc): + if self._stdio_ready: + self._loop.call_soon(self._protocol.pipe_connection_lost, fd, exc, + context=self.context) + self._try_finish() + else: + self._pending_calls.append((_CALL_PIPE_CONNECTION_LOST, fd, exc)) + + cdef _pipe_data_received(self, int fd, data): + if self._stdio_ready: + self._loop.call_soon(self._protocol.pipe_data_received, fd, data, + context=self.context) + else: + self._pending_calls.append((_CALL_PIPE_DATA_RECEIVED, fd, data)) + + cdef _file_redirect_stdio(self, int fd): + fd = os_dup(fd) + os_set_inheritable(fd, True) + self._close_after_spawn(fd) + return fd + + cdef _file_devnull(self): + dn = os_open(os_devnull, os_O_RDWR) + os_set_inheritable(dn, True) + self._close_after_spawn(dn) + return dn + + cdef _file_outpipe(self): + r, w = __socketpair() + os_set_inheritable(w, True) + self._close_after_spawn(w) + return r, w + + cdef _file_inpipe(self): + r, w = __socketpair() + os_set_inheritable(r, True) + self._close_after_spawn(r) + return r, w + + cdef _init_files(self, _stdin, _stdout, _stderr): + cdef uv.uv_stdio_container_t *iocnt + + UVProcess._init_files(self, _stdin, _stdout, _stderr) + + io = [None, None, None] + + self.options.stdio_count = 3 + self.options.stdio = self.iocnt + + if _stdin is not None: + if _stdin == subprocess_PIPE: + r, w = self._file_inpipe() + io[0] = r + + self.stdin_proto = WriteSubprocessPipeProto(self, 0) + waiter = self._loop._new_future() + self._stdin = WriteUnixTransport.new( + self._loop, self.stdin_proto, None, waiter) + self._init_futs.append(waiter) + self._stdin._open(w) + self._stdin._init_protocol() + elif _stdin == subprocess_DEVNULL: + io[0] = self._file_devnull() + elif _stdout == subprocess_STDOUT: + raise ValueError( + 'subprocess.STDOUT is supported only by stderr parameter') + else: + io[0] = self._file_redirect_stdio(_stdin) + else: + io[0] = self._file_redirect_stdio(0) + + if _stdout is not None: + if _stdout == subprocess_PIPE: + # We can't use UV_CREATE_PIPE here, since 'stderr' might be + # set to 'subprocess.STDOUT', and there is no way to + # emulate that functionality with libuv high-level + # streams API. Therefore, we create pipes for stdout and + # stderr manually. + + r, w = self._file_outpipe() + io[1] = w + + self.stdout_proto = ReadSubprocessPipeProto(self, 1) + waiter = self._loop._new_future() + self._stdout = ReadUnixTransport.new( + self._loop, self.stdout_proto, None, waiter) + self._init_futs.append(waiter) + self._stdout._open(r) + self._stdout._init_protocol() + elif _stdout == subprocess_DEVNULL: + io[1] = self._file_devnull() + elif _stdout == subprocess_STDOUT: + raise ValueError( + 'subprocess.STDOUT is supported only by stderr parameter') + else: + io[1] = self._file_redirect_stdio(_stdout) + else: + io[1] = self._file_redirect_stdio(1) + + if _stderr is not None: + if _stderr == subprocess_PIPE: + r, w = self._file_outpipe() + io[2] = w + + self.stderr_proto = ReadSubprocessPipeProto(self, 2) + waiter = self._loop._new_future() + self._stderr = ReadUnixTransport.new( + self._loop, self.stderr_proto, None, waiter) + self._init_futs.append(waiter) + self._stderr._open(r) + self._stderr._init_protocol() + elif _stderr == subprocess_STDOUT: + if io[1] is None: + # shouldn't ever happen + raise RuntimeError('cannot apply subprocess.STDOUT') + + io[2] = self._file_redirect_stdio(io[1]) + elif _stderr == subprocess_DEVNULL: + io[2] = self._file_devnull() + else: + io[2] = self._file_redirect_stdio(_stderr) + else: + io[2] = self._file_redirect_stdio(2) + + assert len(io) == 3 + for idx in range(3): + iocnt = &self.iocnt[idx] + if io[idx] is not None: + iocnt.flags = uv.UV_INHERIT_FD + iocnt.data.fd = io[idx] + else: + iocnt.flags = uv.UV_IGNORE + + cdef _call_connection_made(self, waiter): + try: + # we're always called in the right context, so just call the user's + self._protocol.connection_made(self) + except (KeyboardInterrupt, SystemExit): + raise + except BaseException as ex: + if waiter is not None and not waiter.cancelled(): + waiter.set_exception(ex) + else: + raise + else: + if waiter is not None and not waiter.cancelled(): + waiter.set_result(True) + + self._stdio_ready = 1 + if self._pending_calls: + pending_calls = self._pending_calls.copy() + self._pending_calls.clear() + for (type, fd, arg) in pending_calls: + if type == _CALL_PIPE_CONNECTION_LOST: + self._pipe_connection_lost(fd, arg) + elif type == _CALL_PIPE_DATA_RECEIVED: + self._pipe_data_received(fd, arg) + elif type == _CALL_PROCESS_EXITED: + self._loop.call_soon(self._protocol.process_exited) + elif type == _CALL_CONNECTION_LOST: + self._loop.call_soon(self._protocol.connection_lost, None) + + cdef _try_finish(self): + if self._returncode is None or self._finished: + return + + if ((self.stdin_proto is None or self.stdin_proto.disconnected) and + (self.stdout_proto is None or + self.stdout_proto.disconnected) and + (self.stderr_proto is None or + self.stderr_proto.disconnected)): + + self._finished = 1 + + if self._stdio_ready: + # copy self.context for simplicity + self._loop.call_soon(self._protocol.connection_lost, None, + context=self.context) + else: + self._pending_calls.append((_CALL_CONNECTION_LOST, None, None)) + + def __stdio_inited(self, waiter, stdio_fut): + exc = stdio_fut.exception() + if exc is not None: + if waiter is None: + raise exc + else: + waiter.set_exception(exc) + else: + self._loop._call_soon_handle( + new_MethodHandle1(self._loop, + "UVProcessTransport._call_connection_made", + self._call_connection_made, + None, # means to copy the current context + self, waiter)) + + @staticmethod + cdef UVProcessTransport new(Loop loop, protocol, args, env, + cwd, start_new_session, + _stdin, _stdout, _stderr, pass_fds, + waiter, + debug_flags, + preexec_fn, + restore_signals): + + cdef UVProcessTransport handle + handle = UVProcessTransport.__new__(UVProcessTransport) + handle._protocol = protocol + handle._init(loop, args, env, cwd, start_new_session, + __process_convert_fileno(_stdin), + __process_convert_fileno(_stdout), + __process_convert_fileno(_stderr), + pass_fds, + debug_flags, + preexec_fn, + restore_signals) + + if handle._init_futs: + handle._stdio_ready = 0 + init_fut = aio_gather(*handle._init_futs) + # add_done_callback will copy the current context and run the + # callback within the context + init_fut.add_done_callback( + ft_partial(handle.__stdio_inited, waiter)) + else: + handle._stdio_ready = 1 + loop._call_soon_handle( + new_MethodHandle1(loop, + "UVProcessTransport._call_connection_made", + handle._call_connection_made, + None, # means to copy the current context + handle, waiter)) + + return handle + + def get_protocol(self): + return self._protocol + + def set_protocol(self, protocol): + self._protocol = protocol + + def get_pid(self): + return self._pid + + def get_returncode(self): + return self._returncode + + def get_pipe_transport(self, fd): + if fd == 0: + return self._stdin + elif fd == 1: + return self._stdout + elif fd == 2: + return self._stderr + + def terminate(self): + self._check_proc() + self._kill(uv.SIGTERM) + + def kill(self): + self._check_proc() + self._kill(uv.SIGKILL) + + def send_signal(self, int signal): + self._check_proc() + self._kill(signal) + + def is_closing(self): + return self._closed + + def close(self): + if self._returncode is None: + self._kill(uv.SIGKILL) + + if self._stdin is not None: + self._stdin.close() + if self._stdout is not None: + self._stdout.close() + if self._stderr is not None: + self._stderr.close() + + if self._returncode is not None: + # The process is dead, just close the UV handle. + # + # (If "self._returncode is None", the process should have been + # killed already and we're just waiting for a SIGCHLD; after + # which the transport will be GC'ed and the uvhandle will be + # closed in UVHandle.__dealloc__.) + self._close() + + def get_extra_info(self, name, default=None): + return default + + def _wait(self): + fut = self._loop._new_future() + if self._returncode is not None: + fut.set_result(self._returncode) + return fut + + self._exit_waiters.append(fut) + return fut + + +class WriteSubprocessPipeProto(aio_BaseProtocol): + + def __init__(self, proc, fd): + if UVLOOP_DEBUG: + if type(proc) is not UVProcessTransport: + raise TypeError + if not isinstance(fd, int): + raise TypeError + self.proc = proc + self.fd = fd + self.pipe = None + self.disconnected = False + + def connection_made(self, transport): + self.pipe = transport + + def __repr__(self): + return ('<%s fd=%s pipe=%r>' + % (self.__class__.__name__, self.fd, self.pipe)) + + def connection_lost(self, exc): + self.disconnected = True + (self.proc)._pipe_connection_lost(self.fd, exc) + self.proc = None + + def pause_writing(self): + (self.proc)._protocol.pause_writing() + + def resume_writing(self): + (self.proc)._protocol.resume_writing() + + +class ReadSubprocessPipeProto(WriteSubprocessPipeProto, + aio_Protocol): + + def data_received(self, data): + (self.proc)._pipe_data_received(self.fd, data) + + +cdef __process_convert_fileno(object obj): + if obj is None or isinstance(obj, int): + return obj + + fileno = obj.fileno() + if not isinstance(fileno, int): + raise TypeError( + '{!r}.fileno() returned non-integer'.format(obj)) + return fileno + + +cdef void __uvprocess_on_exit_callback( + uv.uv_process_t *handle, + int64_t exit_status, + int term_signal, +) noexcept with gil: + + if __ensure_handle_data(handle, + "UVProcess exit callback") == 0: + return + + cdef UVProcess proc = handle.data + try: + proc._on_exit(exit_status, term_signal) + except BaseException as ex: + proc._error(ex, False) + + +cdef __socketpair(): + cdef: + int fds[2] + int err + + err = system.socketpair(uv.AF_UNIX, uv.SOCK_STREAM, 0, fds) + if err: + exc = convert_error(-err) + raise exc + + os_set_inheritable(fds[0], False) + os_set_inheritable(fds[1], False) + + return fds[0], fds[1] + + +cdef void __uv_close_process_handle_cb( + uv.uv_handle_t* handle +) noexcept with gil: + PyMem_RawFree(handle) diff --git a/hackaton/lib/python3.12/site-packages/uvloop/handles/stream.pxd b/hackaton/lib/python3.12/site-packages/uvloop/handles/stream.pxd new file mode 100644 index 0000000..8ca8743 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/uvloop/handles/stream.pxd @@ -0,0 +1,50 @@ +cdef class UVStream(UVBaseTransport): + cdef: + uv.uv_shutdown_t _shutdown_req + bint __shutting_down + bint __reading + bint __read_error_close + + bint __buffered + object _protocol_get_buffer + object _protocol_buffer_updated + + bint _eof + list _buffer + size_t _buffer_size + + Py_buffer _read_pybuf + bint _read_pybuf_acquired + + # All "inline" methods are final + + cdef inline _init(self, Loop loop, object protocol, Server server, + object waiter, object context) + + + cdef inline _shutdown(self) + cdef inline _accept(self, UVStream server) + + cdef inline _close_on_read_error(self) + + cdef inline __reading_started(self) + cdef inline __reading_stopped(self) + + # The user API write() and writelines() firstly call _buffer_write() to + # buffer up user data chunks, potentially multiple times in writelines(), + # and then call _initiate_write() to start writing either immediately or in + # the next iteration (loop._queue_write()). + cdef inline _buffer_write(self, object data) + cdef inline _initiate_write(self) + + # _exec_write() is the method that does the actual send, and _try_write() + # is a fast-path used in _exec_write() to send a single chunk. + cdef inline _exec_write(self) + cdef inline _try_write(self, object data) + + cdef _close(self) + + cdef inline _on_accept(self) + cdef inline _on_eof(self) + cdef inline _on_write(self) + cdef inline _on_connect(self, object exc) diff --git a/hackaton/lib/python3.12/site-packages/uvloop/handles/stream.pyx b/hackaton/lib/python3.12/site-packages/uvloop/handles/stream.pyx new file mode 100644 index 0000000..9fbc5a5 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/uvloop/handles/stream.pyx @@ -0,0 +1,1019 @@ +cdef extern from *: + ''' + enum {__PREALLOCED_BUFS = 4}; + ''' + const bint __PREALLOCED_BUFS + + +@cython.no_gc_clear +@cython.freelist(DEFAULT_FREELIST_SIZE) +cdef class _StreamWriteContext: + # used to hold additional write request information for uv_write + + cdef: + uv.uv_write_t req + + list buffers + + uv.uv_buf_t uv_bufs_sml[__PREALLOCED_BUFS] + Py_buffer py_bufs_sml[__PREALLOCED_BUFS] + bint py_bufs_sml_inuse + + uv.uv_buf_t* uv_bufs + Py_buffer* py_bufs + size_t py_bufs_len + + uv.uv_buf_t* uv_bufs_start + size_t uv_bufs_len + + UVStream stream + + bint closed + + cdef free_bufs(self): + cdef size_t i + + if self.uv_bufs is not NULL: + PyMem_RawFree(self.uv_bufs) + self.uv_bufs = NULL + if UVLOOP_DEBUG: + if self.py_bufs_sml_inuse: + raise RuntimeError( + '_StreamWriteContext.close: uv_bufs != NULL and ' + 'py_bufs_sml_inuse is True') + + if self.py_bufs is not NULL: + for i from 0 <= i < self.py_bufs_len: + PyBuffer_Release(&self.py_bufs[i]) + PyMem_RawFree(self.py_bufs) + self.py_bufs = NULL + if UVLOOP_DEBUG: + if self.py_bufs_sml_inuse: + raise RuntimeError( + '_StreamWriteContext.close: py_bufs != NULL and ' + 'py_bufs_sml_inuse is True') + + if self.py_bufs_sml_inuse: + for i from 0 <= i < self.py_bufs_len: + PyBuffer_Release(&self.py_bufs_sml[i]) + self.py_bufs_sml_inuse = 0 + + self.py_bufs_len = 0 + self.buffers = None + + cdef close(self): + if self.closed: + return + self.closed = 1 + self.free_bufs() + Py_DECREF(self) + + cdef advance_uv_buf(self, size_t sent): + # Advance the pointer to first uv_buf and the + # pointer to first byte in that buffer. + # + # We do this after a "uv_try_write" call, which + # sometimes sends only a portion of data. + # We then call "advance_uv_buf" on the write + # context, and reuse it in a "uv_write" call. + + cdef: + uv.uv_buf_t* buf + size_t idx + + for idx from 0 <= idx < self.uv_bufs_len: + buf = &self.uv_bufs_start[idx] + if buf.len > sent: + buf.len -= sent + buf.base = buf.base + sent + self.uv_bufs_start = buf + self.uv_bufs_len -= idx + return + else: + sent -= self.uv_bufs_start[idx].len + + if UVLOOP_DEBUG: + if sent < 0: + raise RuntimeError('fatal: sent < 0 in advance_uv_buf') + + raise RuntimeError('fatal: Could not advance _StreamWriteContext') + + @staticmethod + cdef _StreamWriteContext new(UVStream stream, list buffers): + cdef: + _StreamWriteContext ctx + int uv_bufs_idx = 0 + size_t py_bufs_len = 0 + int i + + Py_buffer* p_pybufs + uv.uv_buf_t* p_uvbufs + + ctx = _StreamWriteContext.__new__(_StreamWriteContext) + ctx.stream = None + ctx.closed = 1 + ctx.py_bufs_len = 0 + ctx.py_bufs_sml_inuse = 0 + ctx.uv_bufs = NULL + ctx.py_bufs = NULL + ctx.buffers = buffers + ctx.stream = stream + + if len(buffers) <= __PREALLOCED_BUFS: + # We've got a small number of buffers to write, don't + # need to use malloc. + ctx.py_bufs_sml_inuse = 1 + p_pybufs = &ctx.py_bufs_sml + p_uvbufs = &ctx.uv_bufs_sml + + else: + for buf in buffers: + if UVLOOP_DEBUG: + if not isinstance(buf, (bytes, bytearray, memoryview)): + raise RuntimeError( + 'invalid data in writebuf: an instance of ' + 'bytes, bytearray or memoryview was expected, ' + 'got {}'.format(type(buf))) + + if not PyBytes_CheckExact(buf): + py_bufs_len += 1 + + if py_bufs_len > 0: + ctx.py_bufs = PyMem_RawMalloc( + py_bufs_len * sizeof(Py_buffer)) + if ctx.py_bufs is NULL: + raise MemoryError() + + ctx.uv_bufs = PyMem_RawMalloc( + len(buffers) * sizeof(uv.uv_buf_t)) + if ctx.uv_bufs is NULL: + raise MemoryError() + + p_pybufs = ctx.py_bufs + p_uvbufs = ctx.uv_bufs + + py_bufs_len = 0 + for buf in buffers: + if PyBytes_CheckExact(buf): + # We can only use this hack for bytes since it's + # immutable. For everything else it is only safe to + # use buffer protocol. + p_uvbufs[uv_bufs_idx].base = PyBytes_AS_STRING(buf) + p_uvbufs[uv_bufs_idx].len = Py_SIZE(buf) + + else: + try: + PyObject_GetBuffer( + buf, &p_pybufs[py_bufs_len], PyBUF_SIMPLE) + except Exception: + # This shouldn't ever happen, as `UVStream._buffer_write` + # casts non-bytes objects to `memoryviews`. + ctx.py_bufs_len = py_bufs_len + ctx.free_bufs() + raise + + p_uvbufs[uv_bufs_idx].base = p_pybufs[py_bufs_len].buf + p_uvbufs[uv_bufs_idx].len = p_pybufs[py_bufs_len].len + + py_bufs_len += 1 + + uv_bufs_idx += 1 + + ctx.uv_bufs_start = p_uvbufs + ctx.uv_bufs_len = uv_bufs_idx + + ctx.py_bufs_len = py_bufs_len + ctx.req.data = ctx + + if UVLOOP_DEBUG: + stream._loop._debug_stream_write_ctx_total += 1 + stream._loop._debug_stream_write_ctx_cnt += 1 + + # Do incref after everything else is done. + # Under no circumstances we want `ctx` to be GCed while + # libuv is still working with `ctx.uv_bufs`. + Py_INCREF(ctx) + ctx.closed = 0 + return ctx + + def __dealloc__(self): + if not self.closed: + # Because we do an INCREF in _StreamWriteContext.new, + # __dealloc__ shouldn't ever happen with `self.closed == 1` + raise RuntimeError( + 'open _StreamWriteContext is being deallocated') + + if UVLOOP_DEBUG: + if self.stream is not None: + self.stream._loop._debug_stream_write_ctx_cnt -= 1 + self.stream = None + + +@cython.no_gc_clear +cdef class UVStream(UVBaseTransport): + + def __cinit__(self): + self.__shutting_down = 0 + self.__reading = 0 + self.__read_error_close = 0 + self.__buffered = 0 + self._eof = 0 + self._buffer = [] + self._buffer_size = 0 + + self._protocol_get_buffer = None + self._protocol_buffer_updated = None + + self._read_pybuf_acquired = False + + cdef _set_protocol(self, object protocol): + if protocol is None: + raise TypeError('protocol is required') + + UVBaseTransport._set_protocol(self, protocol) + + if (hasattr(protocol, 'get_buffer') and + not isinstance(protocol, aio_Protocol)): + try: + self._protocol_get_buffer = protocol.get_buffer + self._protocol_buffer_updated = protocol.buffer_updated + self.__buffered = 1 + except AttributeError: + pass + else: + self.__buffered = 0 + + cdef _clear_protocol(self): + UVBaseTransport._clear_protocol(self) + self._protocol_get_buffer = None + self._protocol_buffer_updated = None + self.__buffered = 0 + + cdef inline _shutdown(self): + cdef int err + + if self.__shutting_down: + return + self.__shutting_down = 1 + + self._ensure_alive() + + self._shutdown_req.data = self + err = uv.uv_shutdown(&self._shutdown_req, + self._handle, + __uv_stream_on_shutdown) + if err < 0: + exc = convert_error(err) + self._fatal_error(exc, True) + return + + cdef inline _accept(self, UVStream server): + cdef int err + self._ensure_alive() + + err = uv.uv_accept(server._handle, + self._handle) + if err < 0: + exc = convert_error(err) + self._fatal_error(exc, True) + return + + self._on_accept() + + cdef inline _close_on_read_error(self): + self.__read_error_close = 1 + + cdef bint _is_reading(self): + return self.__reading + + cdef _start_reading(self): + cdef int err + + if self._closing: + return + + self._ensure_alive() + + if self.__reading: + return + + if self.__buffered: + err = uv.uv_read_start(self._handle, + __uv_stream_buffered_alloc, + __uv_stream_buffered_on_read) + else: + err = uv.uv_read_start(self._handle, + __loop_alloc_buffer, + __uv_stream_on_read) + if err < 0: + exc = convert_error(err) + self._fatal_error(exc, True) + return + else: + # UVStream must live until the read callback is called + self.__reading_started() + + cdef inline __reading_started(self): + if self.__reading: + return + self.__reading = 1 + Py_INCREF(self) + + cdef inline __reading_stopped(self): + if not self.__reading: + return + self.__reading = 0 + Py_DECREF(self) + + cdef _stop_reading(self): + cdef int err + + if not self.__reading: + return + + self._ensure_alive() + + # From libuv docs: + # This function is idempotent and may be safely + # called on a stopped stream. + err = uv.uv_read_stop(self._handle) + if err < 0: + exc = convert_error(err) + self._fatal_error(exc, True) + return + else: + self.__reading_stopped() + + cdef inline _try_write(self, object data): + cdef: + ssize_t written + bint used_buf = 0 + Py_buffer py_buf + void* buf + size_t blen + int saved_errno + int fd + + if (self._handle).write_queue_size != 0: + raise RuntimeError( + 'UVStream._try_write called with data in uv buffers') + + if PyBytes_CheckExact(data): + # We can only use this hack for bytes since it's + # immutable. For everything else it is only safe to + # use buffer protocol. + buf = PyBytes_AS_STRING(data) + blen = Py_SIZE(data) + else: + PyObject_GetBuffer(data, &py_buf, PyBUF_SIMPLE) + used_buf = 1 + buf = py_buf.buf + blen = py_buf.len + + if blen == 0: + # Empty data, do nothing. + return 0 + + fd = self._fileno() + # Use `unistd.h/write` directly, it's faster than + # uv_try_write -- less layers of code. The error + # checking logic is copied from libuv. + written = system.write(fd, buf, blen) + while written == -1 and ( + errno.errno == errno.EINTR or + (system.PLATFORM_IS_APPLE and + errno.errno == errno.EPROTOTYPE)): + # From libuv code (unix/stream.c): + # Due to a possible kernel bug at least in OS X 10.10 "Yosemite", + # EPROTOTYPE can be returned while trying to write to a socket + # that is shutting down. If we retry the write, we should get + # the expected EPIPE instead. + written = system.write(fd, buf, blen) + saved_errno = errno.errno + + if used_buf: + PyBuffer_Release(&py_buf) + + if written < 0: + if saved_errno == errno.EAGAIN or \ + saved_errno == system.EWOULDBLOCK: + return -1 + else: + exc = convert_error(-saved_errno) + self._fatal_error(exc, True) + return + + if UVLOOP_DEBUG: + self._loop._debug_stream_write_tries += 1 + + if written == blen: + return 0 + + return written + + cdef inline _buffer_write(self, object data): + cdef int dlen + + if not PyBytes_CheckExact(data): + data = memoryview(data).cast('b') + + dlen = len(data) + if not dlen: + return + + self._buffer_size += dlen + self._buffer.append(data) + + cdef inline _initiate_write(self): + if (not self._protocol_paused and + (self._handle).write_queue_size == 0 and + self._buffer_size > self._high_water): + # Fast-path. If: + # - the protocol isn't yet paused, + # - there is no data in libuv buffers for this stream, + # - the protocol will be paused if we continue to buffer data + # + # Then: + # - Try to write all buffered data right now. + all_sent = self._exec_write() + if UVLOOP_DEBUG: + if self._buffer_size != 0 or self._buffer != []: + raise RuntimeError( + '_buffer_size is not 0 after a successful _exec_write') + + # There is no need to call `_queue_write` anymore, + # as `uv_write` should be called already. + + if not all_sent: + # If not all of the data was sent successfully, + # we might need to pause the protocol. + self._maybe_pause_protocol() + + elif self._buffer_size > 0: + self._maybe_pause_protocol() + self._loop._queue_write(self) + + cdef inline _exec_write(self): + cdef: + int err + int buf_len + _StreamWriteContext ctx = None + + if self._closed: + # If the handle is closed, just return, it's too + # late to do anything. + return + + buf_len = len(self._buffer) + if not buf_len: + return + + if (self._handle).write_queue_size == 0: + # libuv internal write buffers for this stream are empty. + if buf_len == 1: + # If we only have one piece of data to send, let's + # use our fast implementation of try_write. + data = self._buffer[0] + sent = self._try_write(data) + + if sent is None: + # A `self._fatal_error` was called. + # It might not raise an exception under some + # conditions. + self._buffer_size = 0 + self._buffer.clear() + if not self._closing: + # This should never happen. + raise RuntimeError( + 'stream is open after UVStream._try_write ' + 'returned None') + return + + if sent == 0: + # All data was successfully written. + self._buffer_size = 0 + self._buffer.clear() + # on_write will call "maybe_resume_protocol". + self._on_write() + return True + + if sent > 0: + if UVLOOP_DEBUG: + if sent == len(data): + raise RuntimeError( + '_try_write sent all data and returned ' + 'non-zero') + + if PyBytes_CheckExact(data): + # Cast bytes to memoryview to avoid copying + # data that wasn't sent. + data = memoryview(data) + data = data[sent:] + + self._buffer_size -= sent + self._buffer[0] = data + + # At this point it's either data was sent partially, + # or an EAGAIN has happened. + + else: + ctx = _StreamWriteContext.new(self, self._buffer) + + err = uv.uv_try_write(self._handle, + ctx.uv_bufs_start, + ctx.uv_bufs_len) + + if err > 0: + # Some data was successfully sent. + + if err == self._buffer_size: + # Everything was sent. + ctx.close() + self._buffer.clear() + self._buffer_size = 0 + # on_write will call "maybe_resume_protocol". + self._on_write() + return True + + try: + # Advance pointers to uv_bufs in `ctx`, + # we will reuse it soon for a uv_write + # call. + ctx.advance_uv_buf(err) + except Exception as ex: # This should never happen. + # Let's try to close the `ctx` anyways. + ctx.close() + self._fatal_error(ex, True) + self._buffer.clear() + self._buffer_size = 0 + return + + elif err != uv.UV_EAGAIN: + ctx.close() + exc = convert_error(err) + self._fatal_error(exc, True) + self._buffer.clear() + self._buffer_size = 0 + return + + # fall through + + if ctx is None: + ctx = _StreamWriteContext.new(self, self._buffer) + + err = uv.uv_write(&ctx.req, + self._handle, + ctx.uv_bufs_start, + ctx.uv_bufs_len, + __uv_stream_on_write) + + self._buffer_size = 0 + # Can't use `_buffer.clear()` here: `ctx` holds a reference to + # the `_buffer`. + self._buffer = [] + + if err < 0: + # close write context + ctx.close() + + exc = convert_error(err) + self._fatal_error(exc, True) + return + + self._maybe_resume_protocol() + + cdef size_t _get_write_buffer_size(self): + if self._handle is NULL: + return 0 + return ((self._handle).write_queue_size + + self._buffer_size) + + cdef _close(self): + try: + if self._read_pybuf_acquired: + # Should never happen. libuv always calls uv_alloc/uv_read + # in pairs. + self._loop.call_exception_handler({ + 'transport': self, + 'message': 'XXX: an allocated buffer in transport._close()' + }) + self._read_pybuf_acquired = 0 + PyBuffer_Release(&self._read_pybuf) + + self._stop_reading() + finally: + UVSocketHandle._close(self) + + cdef inline _on_accept(self): + # Ultimately called by __uv_stream_on_listen. + self._init_protocol() + + cdef inline _on_eof(self): + # Any exception raised here will be caught in + # __uv_stream_on_read. + + try: + meth = self._protocol.eof_received + except AttributeError: + keep_open = False + else: + keep_open = run_in_context(self.context, meth) + + if keep_open: + # We're keeping the connection open so the + # protocol can write more, but we still can't + # receive more, so remove the reader callback. + self._stop_reading() + else: + self.close() + + cdef inline _on_write(self): + self._maybe_resume_protocol() + if not self._get_write_buffer_size(): + if self._closing: + self._schedule_call_connection_lost(None) + elif self._eof: + self._shutdown() + + cdef inline _init(self, Loop loop, object protocol, Server server, + object waiter, object context): + self.context = context + self._set_protocol(protocol) + self._start_init(loop) + + if server is not None: + self._set_server(server) + + if waiter is not None: + self._set_waiter(waiter) + + cdef inline _on_connect(self, object exc): + # Called from __tcp_connect_callback (tcp.pyx) and + # __pipe_connect_callback (pipe.pyx). + if exc is None: + self._init_protocol() + else: + if self._waiter is None: + self._fatal_error(exc, False, "connect failed") + elif self._waiter.cancelled(): + # Connect call was cancelled; just close the transport + # silently. + self._close() + elif self._waiter.done(): + self._fatal_error(exc, False, "connect failed") + else: + self._waiter.set_exception(exc) + self._close() + + # === Public API === + + def __repr__(self): + return '<{} closed={} reading={} {:#x}>'.format( + self.__class__.__name__, + self._closed, + self.__reading, + id(self)) + + def write(self, object buf): + self._ensure_alive() + + if self._eof: + raise RuntimeError('Cannot call write() after write_eof()') + if not buf: + return + if self._conn_lost: + self._conn_lost += 1 + return + self._buffer_write(buf) + self._initiate_write() + + def writelines(self, bufs): + self._ensure_alive() + + if self._eof: + raise RuntimeError('Cannot call writelines() after write_eof()') + if self._conn_lost: + self._conn_lost += 1 + return + for buf in bufs: + self._buffer_write(buf) + self._initiate_write() + + def write_eof(self): + self._ensure_alive() + + if self._eof: + return + + self._eof = 1 + if not self._get_write_buffer_size(): + self._shutdown() + + def can_write_eof(self): + return True + + def is_reading(self): + return self._is_reading() + + def pause_reading(self): + if self._closing or not self._is_reading(): + return + self._stop_reading() + + def resume_reading(self): + if self._is_reading() or self._closing: + return + self._start_reading() + + +cdef void __uv_stream_on_shutdown(uv.uv_shutdown_t* req, + int status) noexcept with gil: + + # callback for uv_shutdown + + if req.data is NULL: + aio_logger.error( + 'UVStream.shutdown callback called with NULL req.data, status=%r', + status) + return + + cdef UVStream stream = req.data + + if status < 0 and status != uv.UV_ECANCELED: + # From libuv source code: + # The ECANCELED error code is a lie, the shutdown(2) syscall is a + # fait accompli at this point. Maybe we should revisit this in + # v0.11. A possible reason for leaving it unchanged is that it + # informs the callee that the handle has been destroyed. + + if UVLOOP_DEBUG: + stream._loop._debug_stream_shutdown_errors_total += 1 + + exc = convert_error(status) + stream._fatal_error( + exc, False, "error status in uv_stream_t.shutdown callback") + return + + +cdef inline bint __uv_stream_on_read_common( + UVStream sc, + Loop loop, + ssize_t nread, +): + if sc._closed: + # The stream was closed, there is no reason to + # do any work now. + sc.__reading_stopped() # Just in case. + return True + + if nread == uv.UV_EOF: + # From libuv docs: + # The callee is responsible for stopping closing the stream + # when an error happens by calling uv_read_stop() or uv_close(). + # Trying to read from the stream again is undefined. + try: + if UVLOOP_DEBUG: + loop._debug_stream_read_eof_total += 1 + + sc._stop_reading() + sc._on_eof() + except BaseException as ex: + if UVLOOP_DEBUG: + loop._debug_stream_read_eof_cb_errors_total += 1 + + sc._fatal_error(ex, False) + finally: + return True + + if nread == 0: + # From libuv docs: + # nread might be 0, which does not indicate an error or EOF. + # This is equivalent to EAGAIN or EWOULDBLOCK under read(2). + return True + + if nread < 0: + # From libuv docs: + # The callee is responsible for stopping closing the stream + # when an error happens by calling uv_read_stop() or uv_close(). + # Trying to read from the stream again is undefined. + # + # Therefore, we're closing the stream. Since "UVHandle._close()" + # doesn't raise exceptions unless uvloop is built with DEBUG=1, + # we don't need try...finally here. + + if UVLOOP_DEBUG: + loop._debug_stream_read_errors_total += 1 + + if sc.__read_error_close: + # Used for getting notified when a pipe is closed. + # See WriteUnixTransport for the explanation. + sc._on_eof() + return True + + exc = convert_error(nread) + sc._fatal_error( + exc, False, "error status in uv_stream_t.read callback") + return True + + return False + + +cdef inline void __uv_stream_on_read_impl( + uv.uv_stream_t* stream, + ssize_t nread, + const uv.uv_buf_t* buf, +): + cdef: + UVStream sc = stream.data + Loop loop = sc._loop + + # It's OK to free the buffer early, since nothing will + # be able to touch it until this method is done. + __loop_free_buffer(loop) + + if __uv_stream_on_read_common(sc, loop, nread): + return + + try: + if UVLOOP_DEBUG: + loop._debug_stream_read_cb_total += 1 + + run_in_context1( + sc.context, + sc._protocol_data_received, + loop._recv_buffer[:nread], + ) + except BaseException as exc: + if UVLOOP_DEBUG: + loop._debug_stream_read_cb_errors_total += 1 + + sc._fatal_error(exc, False) + + +cdef inline void __uv_stream_on_write_impl( + uv.uv_write_t* req, + int status, +): + cdef: + _StreamWriteContext ctx = <_StreamWriteContext> req.data + UVStream stream = ctx.stream + + ctx.close() + + if stream._closed: + # The stream was closed, there is nothing to do. + # Even if there is an error, like EPIPE, there + # is no reason to report it. + return + + if status < 0: + if UVLOOP_DEBUG: + stream._loop._debug_stream_write_errors_total += 1 + + exc = convert_error(status) + stream._fatal_error( + exc, False, "error status in uv_stream_t.write callback") + return + + try: + stream._on_write() + except BaseException as exc: + if UVLOOP_DEBUG: + stream._loop._debug_stream_write_cb_errors_total += 1 + + stream._fatal_error(exc, False) + + +cdef void __uv_stream_on_read( + uv.uv_stream_t* stream, + ssize_t nread, + const uv.uv_buf_t* buf, +) noexcept with gil: + + if __ensure_handle_data(stream, + "UVStream read callback") == 0: + return + + # Don't need try-finally, __uv_stream_on_read_impl is void + __uv_stream_on_read_impl(stream, nread, buf) + + +cdef void __uv_stream_on_write( + uv.uv_write_t* req, + int status, +) noexcept with gil: + + if UVLOOP_DEBUG: + if req.data is NULL: + aio_logger.error( + 'UVStream.write callback called with NULL req.data, status=%r', + status) + return + + # Don't need try-finally, __uv_stream_on_write_impl is void + __uv_stream_on_write_impl(req, status) + + +cdef void __uv_stream_buffered_alloc( + uv.uv_handle_t* stream, + size_t suggested_size, + uv.uv_buf_t* uvbuf, +) noexcept with gil: + + if __ensure_handle_data(stream, + "UVStream alloc buffer callback") == 0: + return + + cdef: + UVStream sc = stream.data + Loop loop = sc._loop + Py_buffer* pybuf = &sc._read_pybuf + int got_buf = 0 + + if sc._read_pybuf_acquired: + uvbuf.len = 0 + uvbuf.base = NULL + return + + sc._read_pybuf_acquired = 0 + try: + buf = run_in_context1( + sc.context, + sc._protocol_get_buffer, + suggested_size, + ) + PyObject_GetBuffer(buf, pybuf, PyBUF_WRITABLE) + got_buf = 1 + except BaseException as exc: + # Can't call 'sc._fatal_error' or 'sc._close', libuv will SF. + # We'll do it later in __uv_stream_buffered_on_read when we + # receive UV_ENOBUFS. + uvbuf.len = 0 + uvbuf.base = NULL + return + + if not pybuf.len: + uvbuf.len = 0 + uvbuf.base = NULL + if got_buf: + PyBuffer_Release(pybuf) + return + + sc._read_pybuf_acquired = 1 + uvbuf.base = pybuf.buf + uvbuf.len = pybuf.len + + +cdef void __uv_stream_buffered_on_read( + uv.uv_stream_t* stream, + ssize_t nread, + const uv.uv_buf_t* buf, +) noexcept with gil: + + if __ensure_handle_data(stream, + "UVStream buffered read callback") == 0: + return + + cdef: + UVStream sc = stream.data + Loop loop = sc._loop + Py_buffer* pybuf = &sc._read_pybuf + + if nread == uv.UV_ENOBUFS: + sc._fatal_error( + RuntimeError( + 'unhandled error (or an empty buffer) in get_buffer()'), + False) + return + + try: + if nread > 0 and not sc._read_pybuf_acquired: + # From libuv docs: + # nread is > 0 if there is data available or < 0 on error. When + # we’ve reached EOF, nread will be set to UV_EOF. When + # nread < 0, the buf parameter might not point to a valid + # buffer; in that case buf.len and buf.base are both set to 0. + raise RuntimeError( + f'no python buffer is allocated in on_read; nread={nread}') + + if nread == 0: + # From libuv docs: + # nread might be 0, which does not indicate an error or EOF. + # This is equivalent to EAGAIN or EWOULDBLOCK under read(2). + return + + if __uv_stream_on_read_common(sc, loop, nread): + return + + if UVLOOP_DEBUG: + loop._debug_stream_read_cb_total += 1 + + run_in_context1(sc.context, sc._protocol_buffer_updated, nread) + except BaseException as exc: + if UVLOOP_DEBUG: + loop._debug_stream_read_cb_errors_total += 1 + + sc._fatal_error(exc, False) + finally: + sc._read_pybuf_acquired = 0 + PyBuffer_Release(pybuf) diff --git a/hackaton/lib/python3.12/site-packages/uvloop/handles/streamserver.pxd b/hackaton/lib/python3.12/site-packages/uvloop/handles/streamserver.pxd new file mode 100644 index 0000000..a004efd --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/uvloop/handles/streamserver.pxd @@ -0,0 +1,26 @@ +cdef class UVStreamServer(UVSocketHandle): + cdef: + int backlog + object ssl + object ssl_handshake_timeout + object ssl_shutdown_timeout + object protocol_factory + bint opened + Server _server + + # All "inline" methods are final + + cdef inline _init(self, Loop loop, object protocol_factory, + Server server, + object backlog, + object ssl, + object ssl_handshake_timeout, + object ssl_shutdown_timeout) + + cdef inline _mark_as_open(self) + + cdef inline listen(self) + cdef inline _on_listen(self) + + cdef UVStream _make_new_transport(self, object protocol, object waiter, + object context) diff --git a/hackaton/lib/python3.12/site-packages/uvloop/handles/streamserver.pyx b/hackaton/lib/python3.12/site-packages/uvloop/handles/streamserver.pyx new file mode 100644 index 0000000..9993317 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/uvloop/handles/streamserver.pyx @@ -0,0 +1,150 @@ +@cython.no_gc_clear +cdef class UVStreamServer(UVSocketHandle): + + def __cinit__(self): + self.opened = 0 + self._server = None + self.ssl = None + self.ssl_handshake_timeout = None + self.ssl_shutdown_timeout = None + self.protocol_factory = None + + cdef inline _init(self, Loop loop, object protocol_factory, + Server server, + object backlog, + object ssl, + object ssl_handshake_timeout, + object ssl_shutdown_timeout): + + if not isinstance(backlog, int): + # Don't allow floats + raise TypeError('integer argument expected, got {}'.format( + type(backlog).__name__)) + + if ssl is not None: + if not isinstance(ssl, ssl_SSLContext): + raise TypeError( + 'ssl is expected to be None or an instance of ' + 'ssl.SSLContext, got {!r}'.format(ssl)) + else: + if ssl_handshake_timeout is not None: + raise ValueError( + 'ssl_handshake_timeout is only meaningful with ssl') + if ssl_shutdown_timeout is not None: + raise ValueError( + 'ssl_shutdown_timeout is only meaningful with ssl') + + self.backlog = backlog + self.ssl = ssl + self.ssl_handshake_timeout = ssl_handshake_timeout + self.ssl_shutdown_timeout = ssl_shutdown_timeout + + self._start_init(loop) + self.protocol_factory = protocol_factory + self._server = server + + cdef inline listen(self): + cdef int err + self._ensure_alive() + + if self.protocol_factory is None: + raise RuntimeError('unable to listen(); no protocol_factory') + + if self.opened != 1: + raise RuntimeError('unopened TCPServer') + + self.context = Context_CopyCurrent() + + err = uv.uv_listen( self._handle, + self.backlog, + __uv_streamserver_on_listen) + if err < 0: + exc = convert_error(err) + self._fatal_error(exc, True) + return + + cdef inline _on_listen(self): + cdef UVStream client + + protocol = run_in_context(self.context, self.protocol_factory) + + if self.ssl is None: + client = self._make_new_transport(protocol, None, self.context) + + else: + waiter = self._loop._new_future() + + ssl_protocol = SSLProtocol( + self._loop, protocol, self.ssl, + waiter, + server_side=True, + server_hostname=None, + ssl_handshake_timeout=self.ssl_handshake_timeout, + ssl_shutdown_timeout=self.ssl_shutdown_timeout) + + client = self._make_new_transport(ssl_protocol, None, self.context) + + waiter.add_done_callback( + ft_partial(self.__on_ssl_connected, client)) + + client._accept(self) + + cdef _fatal_error(self, exc, throw, reason=None): + # Overload UVHandle._fatal_error + + self._close() + + if not isinstance(exc, OSError): + + if throw or self._loop is None: + raise exc + + msg = f'Fatal error on server {self.__class__.__name__}' + if reason is not None: + msg = f'{msg} ({reason})' + + self._loop.call_exception_handler({ + 'message': msg, + 'exception': exc, + }) + + cdef inline _mark_as_open(self): + self.opened = 1 + + cdef UVStream _make_new_transport(self, object protocol, object waiter, + object context): + raise NotImplementedError + + def __on_ssl_connected(self, transport, fut): + exc = fut.exception() + if exc is not None: + transport._force_close(exc) + + +cdef void __uv_streamserver_on_listen( + uv.uv_stream_t* handle, + int status, +) noexcept with gil: + + # callback for uv_listen + + if __ensure_handle_data(handle, + "UVStream listen callback") == 0: + return + + cdef: + UVStreamServer stream = handle.data + + if status < 0: + if UVLOOP_DEBUG: + stream._loop._debug_stream_listen_errors_total += 1 + + exc = convert_error(status) + stream._fatal_error( + exc, False, "error status in uv_stream_t.listen callback") + return + + try: + stream._on_listen() + except BaseException as exc: + stream._error(exc, False) diff --git a/hackaton/lib/python3.12/site-packages/uvloop/handles/tcp.pxd b/hackaton/lib/python3.12/site-packages/uvloop/handles/tcp.pxd new file mode 100644 index 0000000..8d388ef --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/uvloop/handles/tcp.pxd @@ -0,0 +1,26 @@ +cdef class TCPServer(UVStreamServer): + cdef bind(self, system.sockaddr* addr, unsigned int flags=*) + + @staticmethod + cdef TCPServer new(Loop loop, object protocol_factory, Server server, + unsigned int flags, + object backlog, + object ssl, + object ssl_handshake_timeout, + object ssl_shutdown_timeout) + + +cdef class TCPTransport(UVStream): + cdef: + bint __peername_set + bint __sockname_set + system.sockaddr_storage __peername + system.sockaddr_storage __sockname + + cdef bind(self, system.sockaddr* addr, unsigned int flags=*) + cdef connect(self, system.sockaddr* addr) + cdef _set_nodelay(self) + + @staticmethod + cdef TCPTransport new(Loop loop, object protocol, Server server, + object waiter, object context) diff --git a/hackaton/lib/python3.12/site-packages/uvloop/handles/tcp.pyx b/hackaton/lib/python3.12/site-packages/uvloop/handles/tcp.pyx new file mode 100644 index 0000000..d5fe827 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/uvloop/handles/tcp.pyx @@ -0,0 +1,228 @@ +cdef __tcp_init_uv_handle(UVStream handle, Loop loop, unsigned int flags): + cdef int err + + handle._handle = PyMem_RawMalloc(sizeof(uv.uv_tcp_t)) + if handle._handle is NULL: + handle._abort_init() + raise MemoryError() + + err = uv.uv_tcp_init_ex(handle._loop.uvloop, + handle._handle, + flags) + if err < 0: + handle._abort_init() + raise convert_error(err) + + handle._finish_init() + + +cdef __tcp_bind(UVStream handle, system.sockaddr* addr, unsigned int flags): + cdef int err + err = uv.uv_tcp_bind(handle._handle, + addr, flags) + if err < 0: + exc = convert_error(err) + raise exc + + +cdef __tcp_open(UVStream handle, int sockfd): + cdef int err + err = uv.uv_tcp_open(handle._handle, + sockfd) + if err < 0: + exc = convert_error(err) + raise exc + + +cdef __tcp_get_socket(UVSocketHandle handle): + cdef: + int buf_len = sizeof(system.sockaddr_storage) + int fileno + int err + system.sockaddr_storage buf + + fileno = handle._fileno() + + err = uv.uv_tcp_getsockname(handle._handle, + &buf, + &buf_len) + if err < 0: + raise convert_error(err) + + return PseudoSocket(buf.ss_family, uv.SOCK_STREAM, 0, fileno) + + +@cython.no_gc_clear +cdef class TCPServer(UVStreamServer): + + @staticmethod + cdef TCPServer new(Loop loop, object protocol_factory, Server server, + unsigned int flags, + object backlog, + object ssl, + object ssl_handshake_timeout, + object ssl_shutdown_timeout): + + cdef TCPServer handle + handle = TCPServer.__new__(TCPServer) + handle._init(loop, protocol_factory, server, backlog, + ssl, ssl_handshake_timeout, ssl_shutdown_timeout) + __tcp_init_uv_handle(handle, loop, flags) + return handle + + cdef _new_socket(self): + return __tcp_get_socket(self) + + cdef _open(self, int sockfd): + self._ensure_alive() + try: + __tcp_open(self, sockfd) + except Exception as exc: + self._fatal_error(exc, True) + else: + self._mark_as_open() + + cdef bind(self, system.sockaddr* addr, unsigned int flags=0): + self._ensure_alive() + try: + __tcp_bind(self, addr, flags) + except Exception as exc: + self._fatal_error(exc, True) + else: + self._mark_as_open() + + cdef UVStream _make_new_transport(self, object protocol, object waiter, + object context): + cdef TCPTransport tr + tr = TCPTransport.new(self._loop, protocol, self._server, waiter, + context) + return tr + + +@cython.no_gc_clear +cdef class TCPTransport(UVStream): + + @staticmethod + cdef TCPTransport new(Loop loop, object protocol, Server server, + object waiter, object context): + + cdef TCPTransport handle + handle = TCPTransport.__new__(TCPTransport) + handle._init(loop, protocol, server, waiter, context) + __tcp_init_uv_handle(handle, loop, uv.AF_UNSPEC) + handle.__peername_set = 0 + handle.__sockname_set = 0 + handle._set_nodelay() + return handle + + cdef _set_nodelay(self): + cdef int err + self._ensure_alive() + err = uv.uv_tcp_nodelay(self._handle, 1) + if err < 0: + raise convert_error(err) + + cdef _call_connection_made(self): + # asyncio saves peername & sockname when transports are instantiated, + # so that they're accessible even after the transport is closed. + # We are doing the same thing here, except that we create Python + # objects lazily, on request in get_extra_info() + + cdef: + int err + int buf_len + + buf_len = sizeof(system.sockaddr_storage) + err = uv.uv_tcp_getsockname(self._handle, + &self.__sockname, + &buf_len) + if err >= 0: + # Ignore errors, this is an optional thing. + # If something serious is going on, the transport + # will crash later (in roughly the same way how + # an asyncio transport would.) + self.__sockname_set = 1 + + buf_len = sizeof(system.sockaddr_storage) + err = uv.uv_tcp_getpeername(self._handle, + &self.__peername, + &buf_len) + if err >= 0: + # Same as few lines above -- we don't really care + # about error case here. + self.__peername_set = 1 + + UVBaseTransport._call_connection_made(self) + + def get_extra_info(self, name, default=None): + if name == 'sockname': + if self.__sockname_set: + return __convert_sockaddr_to_pyaddr( + &self.__sockname) + elif name == 'peername': + if self.__peername_set: + return __convert_sockaddr_to_pyaddr( + &self.__peername) + return super().get_extra_info(name, default) + + cdef _new_socket(self): + return __tcp_get_socket(self) + + cdef bind(self, system.sockaddr* addr, unsigned int flags=0): + self._ensure_alive() + __tcp_bind(self, addr, flags) + + cdef _open(self, int sockfd): + self._ensure_alive() + __tcp_open(self, sockfd) + + cdef connect(self, system.sockaddr* addr): + cdef _TCPConnectRequest req + req = _TCPConnectRequest(self._loop, self) + req.connect(addr) + + +cdef class _TCPConnectRequest(UVRequest): + cdef: + TCPTransport transport + uv.uv_connect_t _req_data + + def __cinit__(self, loop, transport): + self.request = &self._req_data + self.request.data = self + self.transport = transport + + cdef connect(self, system.sockaddr* addr): + cdef int err + err = uv.uv_tcp_connect(self.request, + self.transport._handle, + addr, + __tcp_connect_callback) + if err < 0: + exc = convert_error(err) + self.on_done() + raise exc + + +cdef void __tcp_connect_callback( + uv.uv_connect_t* req, + int status, +) noexcept with gil: + cdef: + _TCPConnectRequest wrapper + TCPTransport transport + + wrapper = <_TCPConnectRequest> req.data + transport = wrapper.transport + + if status < 0: + exc = convert_error(status) + else: + exc = None + + try: + transport._on_connect(exc) + except BaseException as ex: + wrapper.transport._fatal_error(ex, False) + finally: + wrapper.on_done() diff --git a/hackaton/lib/python3.12/site-packages/uvloop/handles/timer.pxd b/hackaton/lib/python3.12/site-packages/uvloop/handles/timer.pxd new file mode 100644 index 0000000..fda23b6 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/uvloop/handles/timer.pxd @@ -0,0 +1,18 @@ +cdef class UVTimer(UVHandle): + cdef: + method_t callback + object ctx + bint running + uint64_t timeout + uint64_t start_t + + cdef _init(self, Loop loop, method_t callback, object ctx, + uint64_t timeout) + + cdef stop(self) + cdef start(self) + cdef get_when(self) + + @staticmethod + cdef UVTimer new(Loop loop, method_t callback, object ctx, + uint64_t timeout) diff --git a/hackaton/lib/python3.12/site-packages/uvloop/handles/timer.pyx b/hackaton/lib/python3.12/site-packages/uvloop/handles/timer.pyx new file mode 100644 index 0000000..86d46ef --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/uvloop/handles/timer.pyx @@ -0,0 +1,89 @@ +@cython.no_gc_clear +cdef class UVTimer(UVHandle): + cdef _init(self, Loop loop, method_t callback, object ctx, + uint64_t timeout): + + cdef int err + + self._start_init(loop) + + self._handle = PyMem_RawMalloc(sizeof(uv.uv_timer_t)) + if self._handle is NULL: + self._abort_init() + raise MemoryError() + + err = uv.uv_timer_init(self._loop.uvloop, self._handle) + if err < 0: + self._abort_init() + raise convert_error(err) + + self._finish_init() + + self.callback = callback + self.ctx = ctx + self.running = 0 + self.timeout = timeout + self.start_t = 0 + + cdef stop(self): + cdef int err + + if not self._is_alive(): + self.running = 0 + return + + if self.running == 1: + err = uv.uv_timer_stop(self._handle) + self.running = 0 + if err < 0: + exc = convert_error(err) + self._fatal_error(exc, True) + return + + cdef start(self): + cdef int err + + self._ensure_alive() + + if self.running == 0: + # Update libuv internal time. + uv.uv_update_time(self._loop.uvloop) # void + self.start_t = uv.uv_now(self._loop.uvloop) + + err = uv.uv_timer_start(self._handle, + __uvtimer_callback, + self.timeout, 0) + if err < 0: + exc = convert_error(err) + self._fatal_error(exc, True) + return + self.running = 1 + + cdef get_when(self): + return self.start_t + self.timeout + + @staticmethod + cdef UVTimer new(Loop loop, method_t callback, object ctx, + uint64_t timeout): + + cdef UVTimer handle + handle = UVTimer.__new__(UVTimer) + handle._init(loop, callback, ctx, timeout) + return handle + + +cdef void __uvtimer_callback( + uv.uv_timer_t* handle, +) noexcept with gil: + if __ensure_handle_data(handle, "UVTimer callback") == 0: + return + + cdef: + UVTimer timer = handle.data + method_t cb = timer.callback + + timer.running = 0 + try: + cb(timer.ctx) + except BaseException as ex: + timer._error(ex, False) diff --git a/hackaton/lib/python3.12/site-packages/uvloop/handles/udp.pxd b/hackaton/lib/python3.12/site-packages/uvloop/handles/udp.pxd new file mode 100644 index 0000000..daa9a1b --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/uvloop/handles/udp.pxd @@ -0,0 +1,22 @@ +cdef class UDPTransport(UVBaseTransport): + cdef: + bint __receiving + int _family + object _address + + cdef _init(self, Loop loop, unsigned int family) + cdef _set_address(self, system.addrinfo *addr) + + cdef _connect(self, system.sockaddr* addr, size_t addr_len) + + cdef _bind(self, system.sockaddr* addr) + cdef open(self, int family, int sockfd) + cdef _set_broadcast(self, bint on) + + cdef inline __receiving_started(self) + cdef inline __receiving_stopped(self) + + cdef _send(self, object data, object addr) + + cdef _on_receive(self, bytes data, object exc, object addr) + cdef _on_sent(self, object exc, object context=*) diff --git a/hackaton/lib/python3.12/site-packages/uvloop/handles/udp.pyx b/hackaton/lib/python3.12/site-packages/uvloop/handles/udp.pyx new file mode 100644 index 0000000..ef20c3f --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/uvloop/handles/udp.pyx @@ -0,0 +1,408 @@ +@cython.no_gc_clear +@cython.freelist(DEFAULT_FREELIST_SIZE) +cdef class _UDPSendContext: + # used to hold additional write request information for uv_write + + cdef: + uv.uv_udp_send_t req + + uv.uv_buf_t uv_buf + Py_buffer py_buf + + UDPTransport udp + + bint closed + + cdef close(self): + if self.closed: + return + + self.closed = 1 + PyBuffer_Release(&self.py_buf) # void + self.req.data = NULL + self.uv_buf.base = NULL + Py_DECREF(self) + self.udp = None + + @staticmethod + cdef _UDPSendContext new(UDPTransport udp, object data): + cdef _UDPSendContext ctx + ctx = _UDPSendContext.__new__(_UDPSendContext) + ctx.udp = None + ctx.closed = 1 + + ctx.req.data = ctx + Py_INCREF(ctx) + + PyObject_GetBuffer(data, &ctx.py_buf, PyBUF_SIMPLE) + ctx.uv_buf.base = ctx.py_buf.buf + ctx.uv_buf.len = ctx.py_buf.len + ctx.udp = udp + + ctx.closed = 0 + return ctx + + def __dealloc__(self): + if UVLOOP_DEBUG: + if not self.closed: + raise RuntimeError( + 'open _UDPSendContext is being deallocated') + self.udp = None + + +@cython.no_gc_clear +cdef class UDPTransport(UVBaseTransport): + def __cinit__(self): + self._family = uv.AF_UNSPEC + self.__receiving = 0 + self._address = None + self.context = Context_CopyCurrent() + + cdef _init(self, Loop loop, unsigned int family): + cdef int err + + self._start_init(loop) + + self._handle = PyMem_RawMalloc(sizeof(uv.uv_udp_t)) + if self._handle is NULL: + self._abort_init() + raise MemoryError() + + err = uv.uv_udp_init_ex(loop.uvloop, + self._handle, + family) + if err < 0: + self._abort_init() + raise convert_error(err) + + if family in (uv.AF_INET, uv.AF_INET6): + self._family = family + + self._finish_init() + + cdef _set_address(self, system.addrinfo *addr): + self._address = __convert_sockaddr_to_pyaddr(addr.ai_addr) + + cdef _connect(self, system.sockaddr* addr, size_t addr_len): + cdef int err + err = uv.uv_udp_connect(self._handle, addr) + if err < 0: + exc = convert_error(err) + raise exc + + cdef open(self, int family, int sockfd): + if family in (uv.AF_INET, uv.AF_INET6, uv.AF_UNIX): + self._family = family + else: + raise ValueError( + 'cannot open a UDP handle, invalid family {}'.format(family)) + + cdef int err + err = uv.uv_udp_open(self._handle, + sockfd) + + if err < 0: + exc = convert_error(err) + raise exc + + cdef _bind(self, system.sockaddr* addr): + cdef: + int err + int flags = 0 + + self._ensure_alive() + + err = uv.uv_udp_bind(self._handle, addr, flags) + if err < 0: + exc = convert_error(err) + raise exc + + cdef _set_broadcast(self, bint on): + cdef int err + + self._ensure_alive() + + err = uv.uv_udp_set_broadcast(self._handle, on) + if err < 0: + exc = convert_error(err) + raise exc + + cdef size_t _get_write_buffer_size(self): + if self._handle is NULL: + return 0 + return (self._handle).send_queue_size + + cdef bint _is_reading(self): + return self.__receiving + + cdef _start_reading(self): + cdef int err + + if self.__receiving: + return + + self._ensure_alive() + + err = uv.uv_udp_recv_start(self._handle, + __loop_alloc_buffer, + __uv_udp_on_receive) + + if err < 0: + exc = convert_error(err) + self._fatal_error(exc, True) + return + else: + # UDPTransport must live until the read callback is called + self.__receiving_started() + + cdef _stop_reading(self): + cdef int err + + if not self.__receiving: + return + + self._ensure_alive() + + err = uv.uv_udp_recv_stop(self._handle) + if err < 0: + exc = convert_error(err) + self._fatal_error(exc, True) + return + else: + self.__receiving_stopped() + + cdef inline __receiving_started(self): + if self.__receiving: + return + self.__receiving = 1 + Py_INCREF(self) + + cdef inline __receiving_stopped(self): + if not self.__receiving: + return + self.__receiving = 0 + Py_DECREF(self) + + cdef _new_socket(self): + if self._family not in (uv.AF_INET, uv.AF_INET6, uv.AF_UNIX): + raise RuntimeError( + 'UDPTransport.family is undefined; ' + 'cannot create python socket') + + fileno = self._fileno() + return PseudoSocket(self._family, uv.SOCK_DGRAM, 0, fileno) + + cdef _send(self, object data, object addr): + cdef: + _UDPSendContext ctx + system.sockaddr_storage saddr_st + system.sockaddr *saddr + Py_buffer try_pybuf + uv.uv_buf_t try_uvbuf + + self._ensure_alive() + + if self._family not in (uv.AF_INET, uv.AF_INET6, uv.AF_UNIX): + raise RuntimeError('UDPTransport.family is undefined; cannot send') + + if addr is None: + saddr = NULL + else: + try: + __convert_pyaddr_to_sockaddr(self._family, addr, + &saddr_st) + except (ValueError, TypeError): + raise + except Exception: + raise ValueError( + f'{addr!r}: socket family mismatch or ' + f'a DNS lookup is required') + saddr = (&saddr_st) + + if self._get_write_buffer_size() == 0: + PyObject_GetBuffer(data, &try_pybuf, PyBUF_SIMPLE) + try_uvbuf.base = try_pybuf.buf + try_uvbuf.len = try_pybuf.len + err = uv.uv_udp_try_send(self._handle, + &try_uvbuf, + 1, + saddr) + PyBuffer_Release(&try_pybuf) + else: + err = uv.UV_EAGAIN + + if err == uv.UV_EAGAIN: + ctx = _UDPSendContext.new(self, data) + err = uv.uv_udp_send(&ctx.req, + self._handle, + &ctx.uv_buf, + 1, + saddr, + __uv_udp_on_send) + + if err < 0: + ctx.close() + + exc = convert_error(err) + if isinstance(exc, OSError): + run_in_context1(self.context.copy(), self._protocol.error_received, exc) + else: + self._fatal_error(exc, True) + else: + self._maybe_pause_protocol() + + else: + self._on_sent(convert_error(err) if err < 0 else None, self.context.copy()) + + cdef _on_receive(self, bytes data, object exc, object addr): + if exc is None: + run_in_context2( + self.context, self._protocol.datagram_received, data, addr, + ) + else: + run_in_context1(self.context, self._protocol.error_received, exc) + + cdef _on_sent(self, object exc, object context=None): + if exc is not None: + if isinstance(exc, OSError): + if context is None: + context = self.context + run_in_context1(context, self._protocol.error_received, exc) + else: + self._fatal_error( + exc, False, 'Fatal write error on datagram transport') + + self._maybe_resume_protocol() + if not self._get_write_buffer_size(): + if self._closing: + self._schedule_call_connection_lost(None) + + # === Public API === + + def sendto(self, data, addr=None): + if not data: + # Replicating asyncio logic here. + return + + if self._address: + if addr not in (None, self._address): + # Replicating asyncio logic here. + raise ValueError( + 'Invalid address: must be None or %s' % (self._address,)) + + # Instead of setting addr to self._address below like what asyncio + # does, we depend on previous uv_udp_connect() to set the address + addr = None + + if self._conn_lost: + # Replicating asyncio logic here. + if self._conn_lost >= LOG_THRESHOLD_FOR_CONNLOST_WRITES: + aio_logger.warning('socket.send() raised exception.') + self._conn_lost += 1 + return + + self._send(data, addr) + + +cdef void __uv_udp_on_receive( + uv.uv_udp_t* handle, + ssize_t nread, + const uv.uv_buf_t* buf, + const system.sockaddr* addr, + unsigned flags +) noexcept with gil: + + if __ensure_handle_data(handle, + "UDPTransport receive callback") == 0: + return + + cdef: + UDPTransport udp = handle.data + Loop loop = udp._loop + bytes data + object pyaddr + + # It's OK to free the buffer early, since nothing will + # be able to touch it until this method is done. + __loop_free_buffer(loop) + + if udp._closed: + # The handle was closed, there is no reason to + # do any work now. + udp.__receiving_stopped() # Just in case. + return + + if addr is NULL and nread == 0: + # From libuv docs: + # addr: struct sockaddr* containing the address + # of the sender. Can be NULL. Valid for the duration + # of the callback only. + # [...] + # The receive callback will be called with + # nread == 0 and addr == NULL when there is + # nothing to read, and with nread == 0 and + # addr != NULL when an empty UDP packet is + # received. + return + + if addr is NULL: + pyaddr = None + elif addr.sa_family == uv.AF_UNSPEC: + # https://github.com/MagicStack/uvloop/issues/304 + if system.PLATFORM_IS_LINUX: + pyaddr = None + else: + pyaddr = '' + else: + try: + pyaddr = __convert_sockaddr_to_pyaddr(addr) + except BaseException as exc: + udp._error(exc, False) + return + + if nread < 0: + exc = convert_error(nread) + udp._on_receive(None, exc, pyaddr) + return + + if nread == 0: + data = b'' + else: + data = loop._recv_buffer[:nread] + + try: + udp._on_receive(data, None, pyaddr) + except BaseException as exc: + udp._error(exc, False) + + +cdef void __uv_udp_on_send( + uv.uv_udp_send_t* req, + int status, +) noexcept with gil: + + if req.data is NULL: + # Shouldn't happen as: + # - _UDPSendContext does an extra INCREF in its 'init()' + # - _UDPSendContext holds a ref to the relevant UDPTransport + aio_logger.error( + 'UVStream.write callback called with NULL req.data, status=%r', + status) + return + + cdef: + _UDPSendContext ctx = <_UDPSendContext> req.data + UDPTransport udp = ctx.udp + + ctx.close() + + if status < 0: + exc = convert_error(status) + print(exc) + else: + exc = None + + try: + udp._on_sent(exc) + except BaseException as exc: + udp._error(exc, False) diff --git a/hackaton/lib/python3.12/site-packages/uvloop/includes/__init__.py b/hackaton/lib/python3.12/site-packages/uvloop/includes/__init__.py new file mode 100644 index 0000000..2ccf9ca --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/uvloop/includes/__init__.py @@ -0,0 +1,23 @@ +# flake8: noqa + +# These have to be synced with the stdlib.pxi +import asyncio +import collections +import concurrent.futures +import errno +import functools +import gc +import inspect +import itertools +import os +import signal +import socket +import subprocess +import ssl +import stat +import sys +import threading +import traceback +import time +import warnings +import weakref diff --git a/hackaton/lib/python3.12/site-packages/uvloop/includes/__pycache__/__init__.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/uvloop/includes/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..c158852 Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/uvloop/includes/__pycache__/__init__.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/uvloop/includes/consts.pxi b/hackaton/lib/python3.12/site-packages/uvloop/includes/consts.pxi new file mode 100644 index 0000000..82f3c32 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/uvloop/includes/consts.pxi @@ -0,0 +1,33 @@ +cdef enum: + UV_STREAM_RECV_BUF_SIZE = 256000 # 250kb + + FLOW_CONTROL_HIGH_WATER = 64 # KiB + FLOW_CONTROL_HIGH_WATER_SSL_READ = 256 # KiB + FLOW_CONTROL_HIGH_WATER_SSL_WRITE = 512 # KiB + + DEFAULT_FREELIST_SIZE = 250 + DNS_PYADDR_TO_SOCKADDR_CACHE_SIZE = 2048 + + DEBUG_STACK_DEPTH = 10 + + + __PROCESS_DEBUG_SLEEP_AFTER_FORK = 1 + + + LOG_THRESHOLD_FOR_CONNLOST_WRITES = 5 + SSL_READ_MAX_SIZE = 256 * 1024 + + +cdef extern from *: + ''' + // Number of seconds to wait for SSL handshake to complete + // The default timeout matches that of Nginx. + #define SSL_HANDSHAKE_TIMEOUT 60.0 + + // Number of seconds to wait for SSL shutdown to complete + // The default timeout mimics lingering_time + #define SSL_SHUTDOWN_TIMEOUT 30.0 + ''' + + const float SSL_HANDSHAKE_TIMEOUT + const float SSL_SHUTDOWN_TIMEOUT diff --git a/hackaton/lib/python3.12/site-packages/uvloop/includes/debug.pxd b/hackaton/lib/python3.12/site-packages/uvloop/includes/debug.pxd new file mode 100644 index 0000000..a825def --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/uvloop/includes/debug.pxd @@ -0,0 +1,3 @@ +cdef extern from "includes/debug.h": + + cdef int UVLOOP_DEBUG diff --git a/hackaton/lib/python3.12/site-packages/uvloop/includes/flowcontrol.pxd b/hackaton/lib/python3.12/site-packages/uvloop/includes/flowcontrol.pxd new file mode 100644 index 0000000..f22f1a7 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/uvloop/includes/flowcontrol.pxd @@ -0,0 +1,23 @@ +# flake8: noqa + + +cdef inline add_flowcontrol_defaults(high, low, int kb): + cdef int h, l + if high is None: + if low is None: + h = kb * 1024 + else: + l = low + h = 4 * l + else: + h = high + if low is None: + l = h // 4 + else: + l = low + + if not h >= l >= 0: + raise ValueError('high (%r) must be >= low (%r) must be >= 0' % + (h, l)) + + return h, l diff --git a/hackaton/lib/python3.12/site-packages/uvloop/includes/python.pxd b/hackaton/lib/python3.12/site-packages/uvloop/includes/python.pxd new file mode 100644 index 0000000..94007e5 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/uvloop/includes/python.pxd @@ -0,0 +1,31 @@ +cdef extern from "Python.h": + int PY_VERSION_HEX + + unicode PyUnicode_FromString(const char *) + + void* PyMem_RawMalloc(size_t n) nogil + void* PyMem_RawRealloc(void *p, size_t n) nogil + void* PyMem_RawCalloc(size_t nelem, size_t elsize) nogil + void PyMem_RawFree(void *p) nogil + + object PyUnicode_EncodeFSDefault(object) + void PyErr_SetInterrupt() nogil + + object PyMemoryView_FromMemory(char *mem, ssize_t size, int flags) + object PyMemoryView_FromObject(object obj) + int PyMemoryView_Check(object obj) + + cdef enum: + PyBUF_WRITE + + +cdef extern from "includes/compat.h": + object Context_CopyCurrent() + int Context_Enter(object) except -1 + int Context_Exit(object) except -1 + + void PyOS_BeforeFork() + void PyOS_AfterFork_Parent() + void PyOS_AfterFork_Child() + + void _Py_RestoreSignals() diff --git a/hackaton/lib/python3.12/site-packages/uvloop/includes/stdlib.pxi b/hackaton/lib/python3.12/site-packages/uvloop/includes/stdlib.pxi new file mode 100644 index 0000000..4152b8a --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/uvloop/includes/stdlib.pxi @@ -0,0 +1,176 @@ +# flake8: noqa + + +import asyncio, asyncio.log, asyncio.base_events, \ + asyncio.sslproto, asyncio.coroutines, \ + asyncio.futures, asyncio.transports +import collections.abc +import concurrent.futures +import errno +import functools +import gc +import inspect +import itertools +import os +import signal +import socket +import subprocess +import ssl +import stat +import sys +import threading +import traceback +import time +import warnings +import weakref + + +cdef aio_get_event_loop = asyncio.get_event_loop +cdef aio_CancelledError = asyncio.CancelledError +cdef aio_InvalidStateError = asyncio.InvalidStateError +cdef aio_TimeoutError = asyncio.TimeoutError +cdef aio_Future = asyncio.Future +cdef aio_Task = asyncio.Task +cdef aio_ensure_future = asyncio.ensure_future +cdef aio_gather = asyncio.gather +cdef aio_wait = asyncio.wait +cdef aio_wrap_future = asyncio.wrap_future +cdef aio_logger = asyncio.log.logger +cdef aio_iscoroutine = asyncio.iscoroutine +cdef aio_iscoroutinefunction = asyncio.iscoroutinefunction +cdef aio_BaseProtocol = asyncio.BaseProtocol +cdef aio_Protocol = asyncio.Protocol +cdef aio_isfuture = getattr(asyncio, 'isfuture', None) +cdef aio_get_running_loop = getattr(asyncio, '_get_running_loop', None) +cdef aio_set_running_loop = getattr(asyncio, '_set_running_loop', None) +cdef aio_debug_wrapper = getattr(asyncio.coroutines, 'debug_wrapper', None) +cdef aio_AbstractChildWatcher = asyncio.AbstractChildWatcher +cdef aio_Transport = asyncio.Transport +cdef aio_FlowControlMixin = asyncio.transports._FlowControlMixin + +cdef col_deque = collections.deque +cdef col_Iterable = collections.abc.Iterable +cdef col_Counter = collections.Counter +cdef col_OrderedDict = collections.OrderedDict + +cdef cc_ThreadPoolExecutor = concurrent.futures.ThreadPoolExecutor +cdef cc_Future = concurrent.futures.Future + +cdef errno_EBADF = errno.EBADF +cdef errno_EINVAL = errno.EINVAL + +cdef ft_partial = functools.partial + +cdef gc_disable = gc.disable + +cdef iter_chain = itertools.chain +cdef inspect_isgenerator = inspect.isgenerator + +cdef int has_IPV6_V6ONLY = hasattr(socket, 'IPV6_V6ONLY') +cdef int IPV6_V6ONLY = getattr(socket, 'IPV6_V6ONLY', -1) +cdef int has_SO_REUSEPORT = hasattr(socket, 'SO_REUSEPORT') +cdef int SO_REUSEPORT = getattr(socket, 'SO_REUSEPORT', 0) +cdef int SO_BROADCAST = getattr(socket, 'SO_BROADCAST') +cdef int SOCK_NONBLOCK = getattr(socket, 'SOCK_NONBLOCK', -1) +cdef int socket_AI_CANONNAME = getattr(socket, 'AI_CANONNAME') + +cdef socket_gaierror = socket.gaierror +cdef socket_error = socket.error +cdef socket_timeout = socket.timeout +cdef socket_socket = socket.socket +cdef socket_socketpair = socket.socketpair +cdef socket_getservbyname = socket.getservbyname +cdef socket_AddressFamily = socket.AddressFamily +cdef socket_SocketKind = socket.SocketKind + +cdef int socket_EAI_ADDRFAMILY = getattr(socket, 'EAI_ADDRFAMILY', -1) +cdef int socket_EAI_AGAIN = getattr(socket, 'EAI_AGAIN', -1) +cdef int socket_EAI_BADFLAGS = getattr(socket, 'EAI_BADFLAGS', -1) +cdef int socket_EAI_BADHINTS = getattr(socket, 'EAI_BADHINTS', -1) +cdef int socket_EAI_CANCELED = getattr(socket, 'EAI_CANCELED', -1) +cdef int socket_EAI_FAIL = getattr(socket, 'EAI_FAIL', -1) +cdef int socket_EAI_FAMILY = getattr(socket, 'EAI_FAMILY', -1) +cdef int socket_EAI_MEMORY = getattr(socket, 'EAI_MEMORY', -1) +cdef int socket_EAI_NODATA = getattr(socket, 'EAI_NODATA', -1) +cdef int socket_EAI_NONAME = getattr(socket, 'EAI_NONAME', -1) +cdef int socket_EAI_OVERFLOW = getattr(socket, 'EAI_OVERFLOW', -1) +cdef int socket_EAI_PROTOCOL = getattr(socket, 'EAI_PROTOCOL', -1) +cdef int socket_EAI_SERVICE = getattr(socket, 'EAI_SERVICE', -1) +cdef int socket_EAI_SOCKTYPE = getattr(socket, 'EAI_SOCKTYPE', -1) + + +cdef str os_name = os.name +cdef os_environ = os.environ +cdef os_dup = os.dup +cdef os_set_inheritable = os.set_inheritable +cdef os_get_inheritable = os.get_inheritable +cdef os_close = os.close +cdef os_open = os.open +cdef os_devnull = os.devnull +cdef os_O_RDWR = os.O_RDWR +cdef os_pipe = os.pipe +cdef os_read = os.read +cdef os_remove = os.remove +cdef os_stat = os.stat +cdef os_unlink = os.unlink +cdef os_fspath = os.fspath + +cdef stat_S_ISSOCK = stat.S_ISSOCK + +cdef sys_ignore_environment = sys.flags.ignore_environment +cdef sys_dev_mode = sys.flags.dev_mode +cdef sys_exc_info = sys.exc_info +cdef sys_set_coroutine_wrapper = getattr(sys, 'set_coroutine_wrapper', None) +cdef sys_get_coroutine_wrapper = getattr(sys, 'get_coroutine_wrapper', None) +cdef sys_getframe = sys._getframe +cdef sys_version_info = sys.version_info +cdef sys_getfilesystemencoding = sys.getfilesystemencoding +cdef str sys_platform = sys.platform + +cdef ssl_SSLContext = ssl.SSLContext +cdef ssl_MemoryBIO = ssl.MemoryBIO +cdef ssl_create_default_context = ssl.create_default_context +cdef ssl_SSLError = ssl.SSLError +cdef ssl_SSLAgainErrors = (ssl.SSLWantReadError, ssl.SSLSyscallError) +cdef ssl_SSLZeroReturnError = ssl.SSLZeroReturnError +cdef ssl_CertificateError = ssl.CertificateError +cdef int ssl_SSL_ERROR_WANT_READ = ssl.SSL_ERROR_WANT_READ +cdef int ssl_SSL_ERROR_WANT_WRITE = ssl.SSL_ERROR_WANT_WRITE +cdef int ssl_SSL_ERROR_SYSCALL = ssl.SSL_ERROR_SYSCALL + +cdef threading_Thread = threading.Thread +cdef threading_main_thread = threading.main_thread + +cdef int subprocess_PIPE = subprocess.PIPE +cdef int subprocess_STDOUT = subprocess.STDOUT +cdef int subprocess_DEVNULL = subprocess.DEVNULL +cdef subprocess_SubprocessError = subprocess.SubprocessError + +cdef int signal_NSIG = signal.NSIG +cdef signal_signal = signal.signal +cdef signal_siginterrupt = signal.siginterrupt +cdef signal_set_wakeup_fd = signal.set_wakeup_fd +cdef signal_default_int_handler = signal.default_int_handler +cdef signal_SIG_DFL = signal.SIG_DFL + +cdef time_sleep = time.sleep +cdef time_monotonic = time.monotonic + +cdef tb_StackSummary = traceback.StackSummary +cdef tb_walk_stack = traceback.walk_stack +cdef tb_format_list = traceback.format_list + +cdef warnings_warn = warnings.warn + +cdef weakref_WeakValueDictionary = weakref.WeakValueDictionary +cdef weakref_WeakSet = weakref.WeakSet + +cdef py_inf = float('inf') + + +# Cython doesn't clean-up imported objects properly in Py3 mode, +# so we delete refs to all modules manually (except sys) +del asyncio, concurrent, collections, errno +del functools, inspect, itertools, socket, os, threading +del signal, subprocess, ssl +del time, traceback, warnings, weakref diff --git a/hackaton/lib/python3.12/site-packages/uvloop/includes/system.pxd b/hackaton/lib/python3.12/site-packages/uvloop/includes/system.pxd new file mode 100644 index 0000000..367fedd --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/uvloop/includes/system.pxd @@ -0,0 +1,96 @@ +from libc.stdint cimport int8_t, uint64_t + +cdef extern from "arpa/inet.h" nogil: + + int ntohl(int) + int htonl(int) + int ntohs(int) + + +cdef extern from "sys/socket.h" nogil: + + struct sockaddr: + unsigned short sa_family + char sa_data[14] + + struct addrinfo: + int ai_flags + int ai_family + int ai_socktype + int ai_protocol + size_t ai_addrlen + sockaddr* ai_addr + char* ai_canonname + addrinfo* ai_next + + struct sockaddr_in: + unsigned short sin_family + unsigned short sin_port + # ... + + struct sockaddr_in6: + unsigned short sin6_family + unsigned short sin6_port + unsigned long sin6_flowinfo + # ... + unsigned long sin6_scope_id + + struct sockaddr_storage: + unsigned short ss_family + # ... + + const char *gai_strerror(int errcode) + + int socketpair(int domain, int type, int protocol, int socket_vector[2]) + + int setsockopt(int socket, int level, int option_name, + const void *option_value, int option_len) + + +cdef extern from "sys/un.h" nogil: + + struct sockaddr_un: + unsigned short sun_family + char* sun_path + # ... + + +cdef extern from "unistd.h" nogil: + + ssize_t write(int fd, const void *buf, size_t count) + void _exit(int status) + + +cdef extern from "pthread.h": + + int pthread_atfork( + void (*prepare)(), + void (*parent)(), + void (*child)()) + + +cdef extern from "includes/compat.h" nogil: + + cdef int EWOULDBLOCK + + cdef int PLATFORM_IS_APPLE + cdef int PLATFORM_IS_LINUX + + struct epoll_event: + # We don't use the fields + pass + + int EPOLL_CTL_DEL + int epoll_ctl(int epfd, int op, int fd, epoll_event *event) + object MakeUnixSockPyAddr(sockaddr_un *addr) + + +cdef extern from "includes/fork_handler.h": + + uint64_t MAIN_THREAD_ID + int8_t MAIN_THREAD_ID_SET + ctypedef void (*OnForkHandler)() + void handleAtFork() + void setForkHandler(OnForkHandler handler) + void resetForkHandler() + void setMainThreadID(uint64_t id) diff --git a/hackaton/lib/python3.12/site-packages/uvloop/includes/uv.pxd b/hackaton/lib/python3.12/site-packages/uvloop/includes/uv.pxd new file mode 100644 index 0000000..510b149 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/uvloop/includes/uv.pxd @@ -0,0 +1,506 @@ +from libc.stdint cimport uint16_t, uint32_t, uint64_t, int64_t +from posix.types cimport gid_t, uid_t +from posix.unistd cimport getuid + +from . cimport system + +# This is an internal enum UV_HANDLE_READABLE from uv-common.h, used only by +# handles/pipe.pyx to temporarily workaround a libuv issue libuv/libuv#2058, +# before there is a proper fix in libuv. In short, libuv disallowed feeding a +# write-only pipe to uv_read_start(), which was needed by uvloop to detect a +# broken pipe without having to send anything on the write-only end. We're +# setting UV_HANDLE_READABLE on pipe_t to workaround this limitation +# temporarily, please see also #317. +cdef enum: + UV_INTERNAL_HANDLE_READABLE = 0x00004000 + +cdef extern from "uv.h" nogil: + cdef int UV_TCP_IPV6ONLY + + cdef int UV_EACCES + cdef int UV_EAGAIN + cdef int UV_EALREADY + cdef int UV_EBUSY + cdef int UV_ECONNABORTED + cdef int UV_ECONNREFUSED + cdef int UV_ECONNRESET + cdef int UV_ECANCELED + cdef int UV_EEXIST + cdef int UV_EINTR + cdef int UV_EINVAL + cdef int UV_EISDIR + cdef int UV_ENOENT + cdef int UV_EOF + cdef int UV_EPERM + cdef int UV_EPIPE + cdef int UV_ESHUTDOWN + cdef int UV_ESRCH + cdef int UV_ETIMEDOUT + cdef int UV_EBADF + cdef int UV_ENOBUFS + + cdef int UV_EAI_ADDRFAMILY + cdef int UV_EAI_AGAIN + cdef int UV_EAI_BADFLAGS + cdef int UV_EAI_BADHINTS + cdef int UV_EAI_CANCELED + cdef int UV_EAI_FAIL + cdef int UV_EAI_FAMILY + cdef int UV_EAI_MEMORY + cdef int UV_EAI_NODATA + cdef int UV_EAI_NONAME + cdef int UV_EAI_OVERFLOW + cdef int UV_EAI_PROTOCOL + cdef int UV_EAI_SERVICE + cdef int UV_EAI_SOCKTYPE + + cdef int SOL_SOCKET + cdef int SO_ERROR + cdef int SO_REUSEADDR + # use has_SO_REUSEPORT and SO_REUSEPORT in stdlib.pxi instead + cdef int AF_INET + cdef int AF_INET6 + cdef int AF_UNIX + cdef int AF_UNSPEC + cdef int AI_PASSIVE + cdef int AI_NUMERICHOST + cdef int INET6_ADDRSTRLEN + cdef int IPPROTO_IPV6 + cdef int SOCK_STREAM + cdef int SOCK_DGRAM + cdef int IPPROTO_TCP + cdef int IPPROTO_UDP + + cdef int SIGINT + cdef int SIGHUP + cdef int SIGCHLD + cdef int SIGKILL + cdef int SIGTERM + + ctypedef int uv_os_sock_t + ctypedef int uv_file + ctypedef int uv_os_fd_t + + ctypedef struct uv_buf_t: + char* base + size_t len + + ctypedef struct uv_loop_t: + void* data + # ... + + ctypedef struct uv_handle_t: + void* data + uv_loop_t* loop + unsigned int flags + # ... + + ctypedef struct uv_idle_t: + void* data + uv_loop_t* loop + # ... + + ctypedef struct uv_check_t: + void* data + uv_loop_t* loop + # ... + + ctypedef struct uv_signal_t: + void* data + uv_loop_t* loop + # ... + + ctypedef struct uv_async_t: + void* data + uv_loop_t* loop + # ... + + ctypedef struct uv_timer_t: + void* data + uv_loop_t* loop + # ... + + ctypedef struct uv_stream_t: + void* data + size_t write_queue_size + uv_loop_t* loop + # ... + + ctypedef struct uv_tcp_t: + void* data + uv_loop_t* loop + # ... + + ctypedef struct uv_pipe_t: + void* data + uv_loop_t* loop + # ... + + ctypedef struct uv_udp_t: + void* data + uv_loop_t* loop + size_t send_queue_size + size_t send_queue_count + # ... + + ctypedef struct uv_udp_send_t: + void* data + uv_udp_t* handle + + ctypedef struct uv_poll_t: + void* data + uv_loop_t* loop + # ... + + ctypedef struct uv_req_t: + # Only cancellation of uv_fs_t, uv_getaddrinfo_t, + # uv_getnameinfo_t and uv_work_t requests is + # currently supported. + void* data + uv_req_type type + # ... + + ctypedef struct uv_connect_t: + void* data + + ctypedef struct uv_getaddrinfo_t: + void* data + # ... + + ctypedef struct uv_getnameinfo_t: + void* data + # ... + + ctypedef struct uv_write_t: + void* data + # ... + + ctypedef struct uv_shutdown_t: + void* data + # ... + + ctypedef struct uv_process_t: + void* data + int pid + # ... + + ctypedef struct uv_fs_event_t: + void* data + # ... + + ctypedef enum uv_req_type: + UV_UNKNOWN_REQ = 0, + UV_REQ, + UV_CONNECT, + UV_WRITE, + UV_SHUTDOWN, + UV_UDP_SEND, + UV_FS, + UV_WORK, + UV_GETADDRINFO, + UV_GETNAMEINFO, + UV_REQ_TYPE_PRIVATE, + UV_REQ_TYPE_MAX + + ctypedef enum uv_run_mode: + UV_RUN_DEFAULT = 0, + UV_RUN_ONCE, + UV_RUN_NOWAIT + + ctypedef enum uv_poll_event: + UV_READABLE = 1, + UV_WRITABLE = 2, + UV_DISCONNECT = 4 + + ctypedef enum uv_udp_flags: + UV_UDP_IPV6ONLY = 1, + UV_UDP_PARTIAL = 2 + + ctypedef enum uv_membership: + UV_LEAVE_GROUP = 0, + UV_JOIN_GROUP + + cdef enum uv_fs_event: + UV_RENAME = 1, + UV_CHANGE = 2 + + const char* uv_strerror(int err) + const char* uv_err_name(int err) + + ctypedef void (*uv_walk_cb)(uv_handle_t* handle, void* arg) with gil + + ctypedef void (*uv_close_cb)(uv_handle_t* handle) with gil + ctypedef void (*uv_idle_cb)(uv_idle_t* handle) with gil + ctypedef void (*uv_check_cb)(uv_check_t* handle) with gil + ctypedef void (*uv_signal_cb)(uv_signal_t* handle, int signum) with gil + ctypedef void (*uv_async_cb)(uv_async_t* handle) with gil + ctypedef void (*uv_timer_cb)(uv_timer_t* handle) with gil + ctypedef void (*uv_connection_cb)(uv_stream_t* server, int status) with gil + ctypedef void (*uv_alloc_cb)(uv_handle_t* handle, + size_t suggested_size, + uv_buf_t* buf) with gil + ctypedef void (*uv_read_cb)(uv_stream_t* stream, + ssize_t nread, + const uv_buf_t* buf) with gil + ctypedef void (*uv_write_cb)(uv_write_t* req, int status) with gil + ctypedef void (*uv_getaddrinfo_cb)(uv_getaddrinfo_t* req, + int status, + system.addrinfo* res) with gil + ctypedef void (*uv_getnameinfo_cb)(uv_getnameinfo_t* req, + int status, + const char* hostname, + const char* service) with gil + ctypedef void (*uv_shutdown_cb)(uv_shutdown_t* req, int status) with gil + ctypedef void (*uv_poll_cb)(uv_poll_t* handle, + int status, int events) with gil + + ctypedef void (*uv_connect_cb)(uv_connect_t* req, int status) with gil + + ctypedef void (*uv_udp_send_cb)(uv_udp_send_t* req, int status) with gil + ctypedef void (*uv_udp_recv_cb)(uv_udp_t* handle, + ssize_t nread, + const uv_buf_t* buf, + const system.sockaddr* addr, + unsigned flags) with gil + ctypedef void (*uv_fs_event_cb)(uv_fs_event_t* handle, + const char *filename, + int events, + int status) with gil + + # Generic request functions + int uv_cancel(uv_req_t* req) + + # Generic handler functions + int uv_is_active(const uv_handle_t* handle) + void uv_close(uv_handle_t* handle, uv_close_cb close_cb) + int uv_is_closing(const uv_handle_t* handle) + int uv_fileno(const uv_handle_t* handle, uv_os_fd_t* fd) + void uv_walk(uv_loop_t* loop, uv_walk_cb walk_cb, void* arg) + + # Loop functions + int uv_loop_init(uv_loop_t* loop) + int uv_loop_close(uv_loop_t* loop) + int uv_loop_alive(uv_loop_t* loop) + int uv_loop_fork(uv_loop_t* loop) + uv_os_fd_t uv_backend_fd(uv_loop_t* loop) + + void uv_update_time(uv_loop_t* loop) + uint64_t uv_now(const uv_loop_t*) + + int uv_run(uv_loop_t*, uv_run_mode mode) nogil + void uv_stop(uv_loop_t*) + + # Idle handler + int uv_idle_init(uv_loop_t*, uv_idle_t* idle) + int uv_idle_start(uv_idle_t* idle, uv_idle_cb cb) + int uv_idle_stop(uv_idle_t* idle) + + # Check handler + int uv_check_init(uv_loop_t*, uv_check_t* idle) + int uv_check_start(uv_check_t* check, uv_check_cb cb) + int uv_check_stop(uv_check_t* check) + + # Signal handler + int uv_signal_init(uv_loop_t* loop, uv_signal_t* handle) + int uv_signal_start(uv_signal_t* handle, + uv_signal_cb signal_cb, + int signum) + int uv_signal_stop(uv_signal_t* handle) + + # Async handler + int uv_async_init(uv_loop_t*, + uv_async_t* async_, + uv_async_cb async_cb) + int uv_async_send(uv_async_t* async_) + + # Timer handler + int uv_timer_init(uv_loop_t*, uv_timer_t* handle) + int uv_timer_start(uv_timer_t* handle, + uv_timer_cb cb, + uint64_t timeout, + uint64_t repeat) + int uv_timer_stop(uv_timer_t* handle) + + # DNS + int uv_getaddrinfo(uv_loop_t* loop, + uv_getaddrinfo_t* req, + uv_getaddrinfo_cb getaddrinfo_cb, + const char* node, + const char* service, + const system.addrinfo* hints) + + void uv_freeaddrinfo(system.addrinfo* ai) + + int uv_getnameinfo(uv_loop_t* loop, + uv_getnameinfo_t* req, + uv_getnameinfo_cb getnameinfo_cb, + const system.sockaddr* addr, + int flags) + + int uv_ip4_name(const system.sockaddr_in* src, char* dst, size_t size) + int uv_ip6_name(const system.sockaddr_in6* src, char* dst, size_t size) + + # Streams + + int uv_listen(uv_stream_t* stream, int backlog, uv_connection_cb cb) + int uv_accept(uv_stream_t* server, uv_stream_t* client) + int uv_read_start(uv_stream_t* stream, + uv_alloc_cb alloc_cb, + uv_read_cb read_cb) + int uv_read_stop(uv_stream_t*) + int uv_write(uv_write_t* req, uv_stream_t* handle, + uv_buf_t bufs[], unsigned int nbufs, uv_write_cb cb) + + int uv_try_write(uv_stream_t* handle, uv_buf_t bufs[], unsigned int nbufs) + + int uv_shutdown(uv_shutdown_t* req, uv_stream_t* handle, uv_shutdown_cb cb) + + int uv_is_readable(const uv_stream_t* handle) + int uv_is_writable(const uv_stream_t* handle) + + # TCP + + int uv_tcp_init_ex(uv_loop_t*, uv_tcp_t* handle, unsigned int flags) + int uv_tcp_nodelay(uv_tcp_t* handle, int enable) + int uv_tcp_keepalive(uv_tcp_t* handle, int enable, unsigned int delay) + int uv_tcp_open(uv_tcp_t* handle, uv_os_sock_t sock) + int uv_tcp_bind(uv_tcp_t* handle, system.sockaddr* addr, + unsigned int flags) + + int uv_tcp_getsockname(const uv_tcp_t* handle, system.sockaddr* name, + int* namelen) + int uv_tcp_getpeername(const uv_tcp_t* handle, system.sockaddr* name, + int* namelen) + + int uv_tcp_connect(uv_connect_t* req, uv_tcp_t* handle, + const system.sockaddr* addr, uv_connect_cb cb) + + # Pipes + + int uv_pipe_init(uv_loop_t* loop, uv_pipe_t* handle, int ipc) + int uv_pipe_open(uv_pipe_t* handle, uv_os_fd_t file) + int uv_pipe_bind(uv_pipe_t* handle, const char* name) + + void uv_pipe_connect(uv_connect_t* req, uv_pipe_t* handle, + const char* name, uv_connect_cb cb) + + # UDP + + int uv_udp_init_ex(uv_loop_t* loop, uv_udp_t* handle, unsigned int flags) + int uv_udp_connect(uv_udp_t* handle, const system.sockaddr* addr) + int uv_udp_open(uv_udp_t* handle, uv_os_sock_t sock) + int uv_udp_bind(uv_udp_t* handle, const system.sockaddr* addr, + unsigned int flags) + int uv_udp_send(uv_udp_send_t* req, uv_udp_t* handle, + const uv_buf_t bufs[], unsigned int nbufs, + const system.sockaddr* addr, uv_udp_send_cb send_cb) + int uv_udp_try_send(uv_udp_t* handle, + const uv_buf_t bufs[], unsigned int nbufs, + const system.sockaddr* addr) + int uv_udp_recv_start(uv_udp_t* handle, uv_alloc_cb alloc_cb, + uv_udp_recv_cb recv_cb) + int uv_udp_recv_stop(uv_udp_t* handle) + int uv_udp_set_broadcast(uv_udp_t* handle, int on) + + # Polling + + int uv_poll_init(uv_loop_t* loop, uv_poll_t* handle, int fd) + int uv_poll_init_socket(uv_loop_t* loop, uv_poll_t* handle, + uv_os_sock_t socket) + int uv_poll_start(uv_poll_t* handle, int events, uv_poll_cb cb) + int uv_poll_stop(uv_poll_t* poll) + + # FS Event + + int uv_fs_event_init(uv_loop_t *loop, uv_fs_event_t *handle) + int uv_fs_event_start(uv_fs_event_t *handle, uv_fs_event_cb cb, + const char *path, unsigned int flags) + int uv_fs_event_stop(uv_fs_event_t *handle) + + # Misc + + ctypedef struct uv_timeval_t: + long tv_sec + long tv_usec + + ctypedef struct uv_rusage_t: + uv_timeval_t ru_utime # user CPU time used + uv_timeval_t ru_stime # system CPU time used + uint64_t ru_maxrss # maximum resident set size + uint64_t ru_ixrss # integral shared memory size + uint64_t ru_idrss # integral unshared data size + uint64_t ru_isrss # integral unshared stack size + uint64_t ru_minflt # page reclaims (soft page faults) + uint64_t ru_majflt # page faults (hard page faults) + uint64_t ru_nswap # swaps + uint64_t ru_inblock # block input operations + uint64_t ru_oublock # block output operations + uint64_t ru_msgsnd # IPC messages sent + uint64_t ru_msgrcv # IPC messages received + uint64_t ru_nsignals # signals received + uint64_t ru_nvcsw # voluntary context switches + uint64_t ru_nivcsw # involuntary context switches + + int uv_getrusage(uv_rusage_t* rusage) + + int uv_ip4_addr(const char* ip, int port, system.sockaddr_in* addr) + int uv_ip6_addr(const char* ip, int port, system.sockaddr_in6* addr) + + # Memory Allocation + + ctypedef void* (*uv_malloc_func)(size_t size) + ctypedef void* (*uv_realloc_func)(void* ptr, size_t size) + ctypedef void* (*uv_calloc_func)(size_t count, size_t size) + ctypedef void (*uv_free_func)(void* ptr) + + int uv_replace_allocator(uv_malloc_func malloc_func, + uv_realloc_func realloc_func, + uv_calloc_func calloc_func, + uv_free_func free_func) + + # Process + + ctypedef void (*uv_exit_cb)(uv_process_t*, int64_t exit_status, + int term_signal) with gil + + ctypedef enum uv_process_flags: + UV_PROCESS_SETUID = 1, + UV_PROCESS_SETGID = 2, + UV_PROCESS_WINDOWS_VERBATIM_ARGUMENTS = 4, + UV_PROCESS_DETACHED = 8, + UV_PROCESS_WINDOWS_HIDE = 16 + + ctypedef enum uv_stdio_flags: + UV_IGNORE = 0x00, + UV_CREATE_PIPE = 0x01, + UV_INHERIT_FD = 0x02, + UV_INHERIT_STREAM = 0x04, + UV_READABLE_PIPE = 0x10, + UV_WRITABLE_PIPE = 0x20 + + ctypedef union uv_stdio_container_data_u: + uv_stream_t* stream + int fd + + ctypedef struct uv_stdio_container_t: + uv_stdio_flags flags + uv_stdio_container_data_u data + + ctypedef struct uv_process_options_t: + uv_exit_cb exit_cb + char* file + char** args + char** env + char* cwd + unsigned int flags + int stdio_count + uv_stdio_container_t* stdio + uid_t uid + gid_t gid + + int uv_spawn(uv_loop_t* loop, uv_process_t* handle, + const uv_process_options_t* options) + + int uv_process_kill(uv_process_t* handle, int signum) + + unsigned int uv_version() diff --git a/hackaton/lib/python3.12/site-packages/uvloop/loop.cpython-312-darwin.so b/hackaton/lib/python3.12/site-packages/uvloop/loop.cpython-312-darwin.so new file mode 100755 index 0000000..13f7da0 Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/uvloop/loop.cpython-312-darwin.so differ diff --git a/hackaton/lib/python3.12/site-packages/uvloop/loop.pxd b/hackaton/lib/python3.12/site-packages/uvloop/loop.pxd new file mode 100644 index 0000000..01e39ae --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/uvloop/loop.pxd @@ -0,0 +1,230 @@ +# cython: language_level=3 + + +from .includes cimport uv +from .includes cimport system + +from libc.stdint cimport uint64_t, uint32_t, int64_t + + +include "includes/consts.pxi" + + +cdef extern from *: + ctypedef int vint "volatile int" + + +cdef class UVHandle +cdef class UVSocketHandle(UVHandle) + +cdef class UVAsync(UVHandle) +cdef class UVTimer(UVHandle) +cdef class UVIdle(UVHandle) + +cdef class UVBaseTransport(UVSocketHandle) + +ctypedef object (*method_t)(object) +ctypedef object (*method1_t)(object, object) +ctypedef object (*method2_t)(object, object, object) +ctypedef object (*method3_t)(object, object, object, object) + + +cdef class Loop: + cdef: + uv.uv_loop_t *uvloop + + bint _coroutine_debug_set + int _coroutine_origin_tracking_saved_depth + + public slow_callback_duration + + readonly bint _closed + bint _debug + bint _running + bint _stopping + + uint64_t _thread_id + + object _task_factory + object _exception_handler + object _default_executor + object _ready + set _queued_streams, _executing_streams + Py_ssize_t _ready_len + + set _servers + + object _transports + set _processes + dict _fd_to_reader_fileobj + dict _fd_to_writer_fileobj + dict _unix_server_sockets + + set _signals + dict _signal_handlers + object _ssock + object _csock + bint _listening_signals + int _old_signal_wakeup_id + + set _timers + dict _polls + + UVProcess active_process_handler + + UVAsync handler_async + UVIdle handler_idle + UVCheck handler_check__exec_writes + + object _last_error + + cdef object __weakref__ + + object _asyncgens + bint _asyncgens_shutdown_called + + bint _executor_shutdown_called + + char _recv_buffer[UV_STREAM_RECV_BUF_SIZE] + bint _recv_buffer_in_use + + # DEBUG fields + # True when compiled with DEBUG. + # Used only in unittests. + readonly bint _debug_cc + + readonly object _debug_handles_total + readonly object _debug_handles_closed + readonly object _debug_handles_current + + readonly uint64_t _debug_uv_handles_total + readonly uint64_t _debug_uv_handles_freed + + readonly uint64_t _debug_cb_handles_total + readonly uint64_t _debug_cb_handles_count + readonly uint64_t _debug_cb_timer_handles_total + readonly uint64_t _debug_cb_timer_handles_count + + readonly uint64_t _debug_stream_shutdown_errors_total + readonly uint64_t _debug_stream_listen_errors_total + + readonly uint64_t _debug_stream_read_cb_total + readonly uint64_t _debug_stream_read_cb_errors_total + readonly uint64_t _debug_stream_read_eof_total + readonly uint64_t _debug_stream_read_eof_cb_errors_total + readonly uint64_t _debug_stream_read_errors_total + + readonly uint64_t _debug_stream_write_tries + readonly uint64_t _debug_stream_write_errors_total + readonly uint64_t _debug_stream_write_ctx_total + readonly uint64_t _debug_stream_write_ctx_cnt + readonly uint64_t _debug_stream_write_cb_errors_total + + readonly uint64_t _poll_read_events_total + readonly uint64_t _poll_read_cb_errors_total + readonly uint64_t _poll_write_events_total + readonly uint64_t _poll_write_cb_errors_total + + readonly uint64_t _sock_try_write_total + + readonly uint64_t _debug_exception_handler_cnt + + cdef _init_debug_fields(self) + + cdef _on_wake(self) + cdef _on_idle(self) + + cdef __run(self, uv.uv_run_mode) + cdef _run(self, uv.uv_run_mode) + + cdef _close(self) + cdef _stop(self, exc) + cdef uint64_t _time(self) + + cdef inline _queue_write(self, UVStream stream) + cdef _exec_queued_writes(self) + + cdef inline _call_soon(self, object callback, object args, object context) + cdef inline _append_ready_handle(self, Handle handle) + cdef inline _call_soon_handle(self, Handle handle) + + cdef _call_later(self, uint64_t delay, object callback, object args, + object context) + + cdef void _handle_exception(self, object ex) + + cdef inline _is_main_thread(self) + + cdef inline _new_future(self) + cdef inline _check_signal(self, sig) + cdef inline _check_closed(self) + cdef inline _check_thread(self) + + cdef _getaddrinfo(self, object host, object port, + int family, int type, + int proto, int flags, + int unpack) + + cdef _getnameinfo(self, system.sockaddr *addr, int flags) + + cdef _track_transport(self, UVBaseTransport transport) + cdef _fileobj_to_fd(self, fileobj) + cdef _ensure_fd_no_transport(self, fd) + + cdef _track_process(self, UVProcess proc) + cdef _untrack_process(self, UVProcess proc) + + cdef _add_reader(self, fd, Handle handle) + cdef _has_reader(self, fd) + cdef _remove_reader(self, fd) + + cdef _add_writer(self, fd, Handle handle) + cdef _has_writer(self, fd) + cdef _remove_writer(self, fd) + + cdef _sock_recv(self, fut, sock, n) + cdef _sock_recv_into(self, fut, sock, buf) + cdef _sock_sendall(self, fut, sock, data) + cdef _sock_accept(self, fut, sock) + + cdef _sock_connect(self, sock, address) + cdef _sock_connect_cb(self, fut, sock, address) + + cdef _sock_set_reuseport(self, int fd) + + cdef _setup_or_resume_signals(self) + cdef _shutdown_signals(self) + cdef _pause_signals(self) + + cdef _handle_signal(self, sig) + cdef _read_from_self(self) + cdef inline _ceval_process_signals(self) + cdef _invoke_signals(self, bytes data) + + cdef _set_coroutine_debug(self, bint enabled) + + cdef _print_debug_info(self) + + +include "cbhandles.pxd" + +include "handles/handle.pxd" +include "handles/async_.pxd" +include "handles/idle.pxd" +include "handles/check.pxd" +include "handles/timer.pxd" +include "handles/poll.pxd" +include "handles/basetransport.pxd" +include "handles/stream.pxd" +include "handles/streamserver.pxd" +include "handles/tcp.pxd" +include "handles/pipe.pxd" +include "handles/process.pxd" +include "handles/fsevent.pxd" + +include "request.pxd" +include "sslproto.pxd" + +include "handles/udp.pxd" + +include "server.pxd" diff --git a/hackaton/lib/python3.12/site-packages/uvloop/loop.pyi b/hackaton/lib/python3.12/site-packages/uvloop/loop.pyi new file mode 100644 index 0000000..9c8c462 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/uvloop/loop.pyi @@ -0,0 +1,297 @@ +import asyncio +import ssl +import sys +from socket import AddressFamily, SocketKind, _Address, _RetAddress, socket +from typing import ( + IO, + Any, + Awaitable, + Callable, + Dict, + Generator, + List, + Optional, + Sequence, + Tuple, + TypeVar, + Union, + overload, +) + +_T = TypeVar('_T') +_Context = Dict[str, Any] +_ExceptionHandler = Callable[[asyncio.AbstractEventLoop, _Context], Any] +_SSLContext = Union[bool, None, ssl.SSLContext] +_ProtocolT = TypeVar("_ProtocolT", bound=asyncio.BaseProtocol) + +class Loop: + def call_soon( + self, callback: Callable[..., Any], *args: Any, context: Optional[Any] = ... + ) -> asyncio.Handle: ... + def call_soon_threadsafe( + self, callback: Callable[..., Any], *args: Any, context: Optional[Any] = ... + ) -> asyncio.Handle: ... + def call_later( + self, delay: float, callback: Callable[..., Any], *args: Any, context: Optional[Any] = ... + ) -> asyncio.TimerHandle: ... + def call_at( + self, when: float, callback: Callable[..., Any], *args: Any, context: Optional[Any] = ... + ) -> asyncio.TimerHandle: ... + def time(self) -> float: ... + def stop(self) -> None: ... + def run_forever(self) -> None: ... + def close(self) -> None: ... + def get_debug(self) -> bool: ... + def set_debug(self, enabled: bool) -> None: ... + def is_running(self) -> bool: ... + def is_closed(self) -> bool: ... + def create_future(self) -> asyncio.Future[Any]: ... + def create_task( + self, + coro: Union[Awaitable[_T], Generator[Any, None, _T]], + *, + name: Optional[str] = ..., + ) -> asyncio.Task[_T]: ... + def set_task_factory( + self, + factory: Optional[ + Callable[[asyncio.AbstractEventLoop, Generator[Any, None, _T]], asyncio.Future[_T]] + ], + ) -> None: ... + def get_task_factory( + self, + ) -> Optional[ + Callable[[asyncio.AbstractEventLoop, Generator[Any, None, _T]], asyncio.Future[_T]] + ]: ... + @overload + def run_until_complete(self, future: Generator[Any, None, _T]) -> _T: ... + @overload + def run_until_complete(self, future: Awaitable[_T]) -> _T: ... + async def getaddrinfo( + self, + host: Optional[Union[str, bytes]], + port: Optional[Union[str, bytes, int]], + *, + family: int = ..., + type: int = ..., + proto: int = ..., + flags: int = ..., + ) -> List[ + Tuple[ + AddressFamily, + SocketKind, + int, + str, + Union[Tuple[str, int], Tuple[str, int, int, int]], + ] + ]: ... + async def getnameinfo( + self, + sockaddr: Union[ + Tuple[str, int], + Tuple[str, int, int], + Tuple[str, int, int, int] + ], + flags: int = ..., + ) -> Tuple[str, str]: ... + async def start_tls( + self, + transport: asyncio.BaseTransport, + protocol: asyncio.BaseProtocol, + sslcontext: ssl.SSLContext, + *, + server_side: bool = ..., + server_hostname: Optional[str] = ..., + ssl_handshake_timeout: Optional[float] = ..., + ssl_shutdown_timeout: Optional[float] = ..., + ) -> asyncio.BaseTransport: ... + @overload + async def create_server( + self, + protocol_factory: asyncio.events._ProtocolFactory, + host: Optional[Union[str, Sequence[str]]] = ..., + port: int = ..., + *, + family: int = ..., + flags: int = ..., + sock: None = ..., + backlog: int = ..., + ssl: _SSLContext = ..., + reuse_address: Optional[bool] = ..., + reuse_port: Optional[bool] = ..., + ssl_handshake_timeout: Optional[float] = ..., + ssl_shutdown_timeout: Optional[float] = ..., + start_serving: bool = ..., + ) -> asyncio.AbstractServer: ... + @overload + async def create_server( + self, + protocol_factory: asyncio.events._ProtocolFactory, + host: None = ..., + port: None = ..., + *, + family: int = ..., + flags: int = ..., + sock: socket = ..., + backlog: int = ..., + ssl: _SSLContext = ..., + reuse_address: Optional[bool] = ..., + reuse_port: Optional[bool] = ..., + ssl_handshake_timeout: Optional[float] = ..., + ssl_shutdown_timeout: Optional[float] = ..., + start_serving: bool = ..., + ) -> asyncio.AbstractServer: ... + @overload + async def create_connection( + self, + protocol_factory: Callable[[], _ProtocolT], + host: str = ..., + port: int = ..., + *, + ssl: _SSLContext = ..., + family: int = ..., + proto: int = ..., + flags: int = ..., + sock: None = ..., + local_addr: Optional[Tuple[str, int]] = ..., + server_hostname: Optional[str] = ..., + ssl_handshake_timeout: Optional[float] = ..., + ssl_shutdown_timeout: Optional[float] = ..., + ) -> tuple[asyncio.BaseProtocol, _ProtocolT]: ... + @overload + async def create_connection( + self, + protocol_factory: Callable[[], _ProtocolT], + host: None = ..., + port: None = ..., + *, + ssl: _SSLContext = ..., + family: int = ..., + proto: int = ..., + flags: int = ..., + sock: socket, + local_addr: None = ..., + server_hostname: Optional[str] = ..., + ssl_handshake_timeout: Optional[float] = ..., + ssl_shutdown_timeout: Optional[float] = ..., + ) -> tuple[asyncio.BaseProtocol, _ProtocolT]: ... + async def create_unix_server( + self, + protocol_factory: asyncio.events._ProtocolFactory, + path: Optional[str] = ..., + *, + backlog: int = ..., + sock: Optional[socket] = ..., + ssl: _SSLContext = ..., + ssl_handshake_timeout: Optional[float] = ..., + ssl_shutdown_timeout: Optional[float] = ..., + start_serving: bool = ..., + ) -> asyncio.AbstractServer: ... + async def create_unix_connection( + self, + protocol_factory: Callable[[], _ProtocolT], + path: Optional[str] = ..., + *, + ssl: _SSLContext = ..., + sock: Optional[socket] = ..., + server_hostname: Optional[str] = ..., + ssl_handshake_timeout: Optional[float] = ..., + ssl_shutdown_timeout: Optional[float] = ..., + ) -> tuple[asyncio.BaseProtocol, _ProtocolT]: ... + def default_exception_handler(self, context: _Context) -> None: ... + def get_exception_handler(self) -> Optional[_ExceptionHandler]: ... + def set_exception_handler(self, handler: Optional[_ExceptionHandler]) -> None: ... + def call_exception_handler(self, context: _Context) -> None: ... + def add_reader(self, fd: Any, callback: Callable[..., Any], *args: Any) -> None: ... + def remove_reader(self, fd: Any) -> None: ... + def add_writer(self, fd: Any, callback: Callable[..., Any], *args: Any) -> None: ... + def remove_writer(self, fd: Any) -> None: ... + async def sock_recv(self, sock: socket, nbytes: int) -> bytes: ... + async def sock_recv_into(self, sock: socket, buf: bytearray) -> int: ... + async def sock_sendall(self, sock: socket, data: bytes) -> None: ... + async def sock_accept(self, sock: socket) -> Tuple[socket, _RetAddress]: ... + async def sock_connect(self, sock: socket, address: _Address) -> None: ... + async def sock_recvfrom(self, sock: socket, bufsize: int) -> bytes: ... + async def sock_recvfrom_into(self, sock: socket, buf: bytearray, nbytes: int = ...) -> int: ... + async def sock_sendto(self, sock: socket, data: bytes, address: _Address) -> None: ... + async def connect_accepted_socket( + self, + protocol_factory: Callable[[], _ProtocolT], + sock: socket, + *, + ssl: _SSLContext = ..., + ssl_handshake_timeout: Optional[float] = ..., + ssl_shutdown_timeout: Optional[float] = ..., + ) -> tuple[asyncio.BaseProtocol, _ProtocolT]: ... + async def run_in_executor( + self, executor: Any, func: Callable[..., _T], *args: Any + ) -> _T: ... + def set_default_executor(self, executor: Any) -> None: ... + async def subprocess_shell( + self, + protocol_factory: Callable[[], _ProtocolT], + cmd: Union[bytes, str], + *, + stdin: Any = ..., + stdout: Any = ..., + stderr: Any = ..., + **kwargs: Any, + ) -> tuple[asyncio.BaseProtocol, _ProtocolT]: ... + async def subprocess_exec( + self, + protocol_factory: Callable[[], _ProtocolT], + *args: Any, + stdin: Any = ..., + stdout: Any = ..., + stderr: Any = ..., + **kwargs: Any, + ) -> tuple[asyncio.BaseProtocol, _ProtocolT]: ... + async def connect_read_pipe( + self, protocol_factory: Callable[[], _ProtocolT], pipe: Any + ) -> tuple[asyncio.BaseProtocol, _ProtocolT]: ... + async def connect_write_pipe( + self, protocol_factory: Callable[[], _ProtocolT], pipe: Any + ) -> tuple[asyncio.BaseProtocol, _ProtocolT]: ... + def add_signal_handler( + self, sig: int, callback: Callable[..., Any], *args: Any + ) -> None: ... + def remove_signal_handler(self, sig: int) -> bool: ... + async def create_datagram_endpoint( + self, + protocol_factory: Callable[[], _ProtocolT], + local_addr: Optional[Tuple[str, int]] = ..., + remote_addr: Optional[Tuple[str, int]] = ..., + *, + family: int = ..., + proto: int = ..., + flags: int = ..., + reuse_address: Optional[bool] = ..., + reuse_port: Optional[bool] = ..., + allow_broadcast: Optional[bool] = ..., + sock: Optional[socket] = ..., + ) -> tuple[asyncio.BaseProtocol, _ProtocolT]: ... + async def shutdown_asyncgens(self) -> None: ... + async def shutdown_default_executor( + self, + timeout: Optional[float] = ..., + ) -> None: ... + # Loop doesn't implement these, but since they are marked as abstract in typeshed, + # we have to put them in so mypy thinks the base methods are overridden + async def sendfile( + self, + transport: asyncio.BaseTransport, + file: IO[bytes], + offset: int = ..., + count: Optional[int] = ..., + *, + fallback: bool = ..., + ) -> int: ... + async def sock_sendfile( + self, + sock: socket, + file: IO[bytes], + offset: int = ..., + count: Optional[int] = ..., + *, + fallback: bool = ... + ) -> int: ... diff --git a/hackaton/lib/python3.12/site-packages/uvloop/loop.pyx b/hackaton/lib/python3.12/site-packages/uvloop/loop.pyx new file mode 100644 index 0000000..f9a5a23 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/uvloop/loop.pyx @@ -0,0 +1,3424 @@ +# cython: language_level=3, embedsignature=True + +import asyncio +cimport cython + +from .includes.debug cimport UVLOOP_DEBUG +from .includes cimport uv +from .includes cimport system +from .includes.python cimport ( + PY_VERSION_HEX, + PyMem_RawMalloc, PyMem_RawFree, + PyMem_RawCalloc, PyMem_RawRealloc, + PyUnicode_EncodeFSDefault, + PyErr_SetInterrupt, + _Py_RestoreSignals, + Context_CopyCurrent, + Context_Enter, + Context_Exit, + PyMemoryView_FromMemory, PyBUF_WRITE, + PyMemoryView_FromObject, PyMemoryView_Check, + PyOS_AfterFork_Parent, PyOS_AfterFork_Child, + PyOS_BeforeFork, + PyUnicode_FromString +) +from .includes.flowcontrol cimport add_flowcontrol_defaults + +from libc.stdint cimport uint64_t +from libc.string cimport memset, strerror, memcpy +from libc cimport errno + +from cpython cimport PyObject +from cpython cimport PyErr_CheckSignals, PyErr_Occurred +from cpython cimport PyThread_get_thread_ident +from cpython cimport Py_INCREF, Py_DECREF, Py_XDECREF, Py_XINCREF +from cpython cimport ( + PyObject_GetBuffer, PyBuffer_Release, PyBUF_SIMPLE, + Py_buffer, PyBytes_AsString, PyBytes_CheckExact, + PyBytes_AsStringAndSize, + Py_SIZE, PyBytes_AS_STRING, PyBUF_WRITABLE +) +from cpython.pycapsule cimport PyCapsule_New, PyCapsule_GetPointer + +from . import _noop + + +include "includes/stdlib.pxi" + +include "errors.pyx" + +cdef: + int PY39 = PY_VERSION_HEX >= 0x03090000 + int PY311 = PY_VERSION_HEX >= 0x030b0000 + int PY313 = PY_VERSION_HEX >= 0x030d0000 + uint64_t MAX_SLEEP = 3600 * 24 * 365 * 100 + + +cdef _is_sock_stream(sock_type): + if SOCK_NONBLOCK == -1: + return sock_type == uv.SOCK_STREAM + else: + # Linux's socket.type is a bitmask that can include extra info + # about socket (like SOCK_NONBLOCK bit), therefore we can't do simple + # `sock_type == socket.SOCK_STREAM`, see + # https://github.com/torvalds/linux/blob/v4.13/include/linux/net.h#L77 + # for more details. + return (sock_type & 0xF) == uv.SOCK_STREAM + + +cdef _is_sock_dgram(sock_type): + if SOCK_NONBLOCK == -1: + return sock_type == uv.SOCK_DGRAM + else: + # Read the comment in `_is_sock_stream`. + return (sock_type & 0xF) == uv.SOCK_DGRAM + + +cdef isfuture(obj): + if aio_isfuture is None: + return isinstance(obj, aio_Future) + else: + return aio_isfuture(obj) + + +cdef inline socket_inc_io_ref(sock): + if isinstance(sock, socket_socket): + sock._io_refs += 1 + + +cdef inline socket_dec_io_ref(sock): + if isinstance(sock, socket_socket): + sock._decref_socketios() + + +cdef inline run_in_context(context, method): + # This method is internally used to workaround a reference issue that in + # certain circumstances, inlined context.run() will not hold a reference to + # the given method instance, which - if deallocated - will cause segfault. + # See also: edgedb/edgedb#2222 + Py_INCREF(method) + try: + return context.run(method) + finally: + Py_DECREF(method) + + +cdef inline run_in_context1(context, method, arg): + Py_INCREF(method) + try: + return context.run(method, arg) + finally: + Py_DECREF(method) + + +cdef inline run_in_context2(context, method, arg1, arg2): + Py_INCREF(method) + try: + return context.run(method, arg1, arg2) + finally: + Py_DECREF(method) + + +# Used for deprecation and removal of `loop.create_datagram_endpoint()`'s +# *reuse_address* parameter +_unset = object() + + +@cython.no_gc_clear +cdef class Loop: + def __cinit__(self): + cdef int err + + # Install PyMem* memory allocators if they aren't installed yet. + __install_pymem() + + # Install pthread_atfork handlers + __install_atfork() + + self.uvloop = PyMem_RawMalloc(sizeof(uv.uv_loop_t)) + if self.uvloop is NULL: + raise MemoryError() + + self.slow_callback_duration = 0.1 + + self._closed = 0 + self._debug = 0 + self._thread_id = 0 + self._running = 0 + self._stopping = 0 + + self._transports = weakref_WeakValueDictionary() + self._processes = set() + + # Used to keep a reference (and hence keep the fileobj alive) + # for as long as its registered by add_reader or add_writer. + # This is how the selector module and hence asyncio behaves. + self._fd_to_reader_fileobj = {} + self._fd_to_writer_fileobj = {} + + self._unix_server_sockets = {} + + self._timers = set() + self._polls = {} + + self._recv_buffer_in_use = 0 + + err = uv.uv_loop_init(self.uvloop) + if err < 0: + raise convert_error(err) + self.uvloop.data = self + + self._init_debug_fields() + + self.active_process_handler = None + + self._last_error = None + + self._task_factory = None + self._exception_handler = None + self._default_executor = None + + self._queued_streams = set() + self._executing_streams = set() + self._ready = col_deque() + self._ready_len = 0 + + self.handler_async = UVAsync.new( + self, self._on_wake, self) + + self.handler_idle = UVIdle.new( + self, + new_MethodHandle( + self, "loop._on_idle", self._on_idle, None, self)) + + # Needed to call `UVStream._exec_write` for writes scheduled + # during `Protocol.data_received`. + self.handler_check__exec_writes = UVCheck.new( + self, + new_MethodHandle( + self, "loop._exec_queued_writes", + self._exec_queued_writes, None, self)) + + self._signals = set() + self._ssock = self._csock = None + self._signal_handlers = {} + self._listening_signals = False + self._old_signal_wakeup_id = -1 + + self._coroutine_debug_set = False + + # A weak set of all asynchronous generators that are + # being iterated by the loop. + self._asyncgens = weakref_WeakSet() + + # Set to True when `loop.shutdown_asyncgens` is called. + self._asyncgens_shutdown_called = False + # Set to True when `loop.shutdown_default_executor` is called. + self._executor_shutdown_called = False + + self._servers = set() + + cdef inline _is_main_thread(self): + cdef uint64_t main_thread_id = system.MAIN_THREAD_ID + if system.MAIN_THREAD_ID_SET == 0: + main_thread_id = threading_main_thread().ident + system.setMainThreadID(main_thread_id) + return main_thread_id == PyThread_get_thread_ident() + + def __init__(self): + self.set_debug( + sys_dev_mode or (not sys_ignore_environment + and bool(os_environ.get('PYTHONASYNCIODEBUG')))) + + def __dealloc__(self): + if self._running == 1: + raise RuntimeError('deallocating a running event loop!') + if self._closed == 0: + aio_logger.error("deallocating an open event loop") + return + PyMem_RawFree(self.uvloop) + self.uvloop = NULL + + cdef _init_debug_fields(self): + self._debug_cc = bool(UVLOOP_DEBUG) + + if UVLOOP_DEBUG: + self._debug_handles_current = col_Counter() + self._debug_handles_closed = col_Counter() + self._debug_handles_total = col_Counter() + else: + self._debug_handles_current = None + self._debug_handles_closed = None + self._debug_handles_total = None + + self._debug_uv_handles_total = 0 + self._debug_uv_handles_freed = 0 + + self._debug_stream_read_cb_total = 0 + self._debug_stream_read_eof_total = 0 + self._debug_stream_read_errors_total = 0 + self._debug_stream_read_cb_errors_total = 0 + self._debug_stream_read_eof_cb_errors_total = 0 + + self._debug_stream_shutdown_errors_total = 0 + self._debug_stream_listen_errors_total = 0 + + self._debug_stream_write_tries = 0 + self._debug_stream_write_errors_total = 0 + self._debug_stream_write_ctx_total = 0 + self._debug_stream_write_ctx_cnt = 0 + self._debug_stream_write_cb_errors_total = 0 + + self._debug_cb_handles_total = 0 + self._debug_cb_handles_count = 0 + + self._debug_cb_timer_handles_total = 0 + self._debug_cb_timer_handles_count = 0 + + self._poll_read_events_total = 0 + self._poll_read_cb_errors_total = 0 + self._poll_write_events_total = 0 + self._poll_write_cb_errors_total = 0 + + self._sock_try_write_total = 0 + + self._debug_exception_handler_cnt = 0 + + cdef _setup_or_resume_signals(self): + if not self._is_main_thread(): + return + + if self._listening_signals: + raise RuntimeError('signals handling has been already setup') + + if self._ssock is not None: + raise RuntimeError('self-pipe exists before loop run') + + # Create a self-pipe and call set_signal_wakeup_fd() with one + # of its ends. This is needed so that libuv knows that it needs + # to wakeup on ^C (no matter if the SIGINT handler is still the + # standard Python's one or or user set their own.) + + self._ssock, self._csock = socket_socketpair() + try: + self._ssock.setblocking(False) + self._csock.setblocking(False) + + fileno = self._csock.fileno() + + self._old_signal_wakeup_id = _set_signal_wakeup_fd(fileno) + except Exception: + # Out of all statements in the try block, only the + # "_set_signal_wakeup_fd()" call can fail, but it shouldn't, + # as we ensure that the current thread is the main thread. + # Still, if something goes horribly wrong we want to clean up + # the socket pair. + self._ssock.close() + self._csock.close() + self._ssock = None + self._csock = None + raise + + self._add_reader( + self._ssock, + new_MethodHandle( + self, + "Loop._read_from_self", + self._read_from_self, + None, + self)) + + self._listening_signals = True + + cdef _pause_signals(self): + if not self._is_main_thread(): + if self._listening_signals: + raise RuntimeError( + 'cannot pause signals handling; no longer running in ' + 'the main thread') + else: + return + + if not self._listening_signals: + raise RuntimeError('signals handling has not been setup') + + self._listening_signals = False + + _set_signal_wakeup_fd(self._old_signal_wakeup_id) + + self._remove_reader(self._ssock) + self._ssock.close() + self._csock.close() + self._ssock = None + self._csock = None + + cdef _shutdown_signals(self): + if not self._is_main_thread(): + if self._signal_handlers: + aio_logger.warning( + 'cannot cleanup signal handlers: closing the event loop ' + 'in a non-main OS thread') + return + + if self._listening_signals: + raise RuntimeError( + 'cannot shutdown signals handling as it has not been paused') + + if self._ssock: + raise RuntimeError( + 'self-pipe was not cleaned up after loop was run') + + for sig in list(self._signal_handlers): + self.remove_signal_handler(sig) + + def __sighandler(self, signum, frame): + self._signals.add(signum) + + cdef inline _ceval_process_signals(self): + # Invoke CPython eval loop to let process signals. + PyErr_CheckSignals() + # Calling a pure-Python function will invoke + # _PyEval_EvalFrameDefault which will process + # pending signal callbacks. + _noop.noop() # Might raise ^C + + cdef _read_from_self(self): + cdef bytes sigdata + sigdata = b'' + while True: + try: + data = self._ssock.recv(65536) + if not data: + break + sigdata += data + except InterruptedError: + continue + except BlockingIOError: + break + if sigdata: + self._invoke_signals(sigdata) + + cdef _invoke_signals(self, bytes data): + cdef set sigs + + self._ceval_process_signals() + + sigs = self._signals.copy() + self._signals.clear() + for signum in data: + if not signum: + # ignore null bytes written by set_wakeup_fd() + continue + sigs.discard(signum) + self._handle_signal(signum) + + for signum in sigs: + # Since not all signals are registered by add_signal_handler() + # (for instance, we use the default SIGINT handler) not all + # signals will trigger loop.__sighandler() callback. Therefore + # we combine two datasources: one is self-pipe, one is data + # from __sighandler; this ensures that signals shouldn't be + # lost even if set_wakeup_fd() couldn't write to the self-pipe. + self._handle_signal(signum) + + cdef _handle_signal(self, sig): + cdef Handle handle + + try: + handle = (self._signal_handlers[sig]) + except KeyError: + handle = None + + if handle is None: + self._ceval_process_signals() + return + + if handle._cancelled: + self.remove_signal_handler(sig) # Remove it properly. + else: + self._append_ready_handle(handle) + self.handler_async.send() + + cdef _on_wake(self): + if ((self._ready_len > 0 or self._stopping) and + not self.handler_idle.running): + self.handler_idle.start() + + cdef _on_idle(self): + cdef: + int i, ntodo + object popleft = self._ready.popleft + Handle handler + + ntodo = len(self._ready) + if self._debug: + for i from 0 <= i < ntodo: + handler = popleft() + if handler._cancelled == 0: + try: + started = time_monotonic() + handler._run() + except BaseException as ex: + self._stop(ex) + return + else: + delta = time_monotonic() - started + if delta > self.slow_callback_duration: + aio_logger.warning( + 'Executing %s took %.3f seconds', + handler._format_handle(), delta) + + else: + for i from 0 <= i < ntodo: + handler = popleft() + if handler._cancelled == 0: + try: + handler._run() + except BaseException as ex: + self._stop(ex) + return + + if len(self._queued_streams): + self._exec_queued_writes() + + self._ready_len = len(self._ready) + if self._ready_len == 0 and self.handler_idle.running: + self.handler_idle.stop() + + if self._stopping: + uv.uv_stop(self.uvloop) # void + + cdef _stop(self, exc): + if exc is not None: + self._last_error = exc + if self._stopping == 1: + return + self._stopping = 1 + if not self.handler_idle.running: + self.handler_idle.start() + + cdef __run(self, uv.uv_run_mode mode): + # Although every UVHandle holds a reference to the loop, + # we want to do everything to ensure that the loop will + # never deallocate during the run -- so we do some + # manual refs management. + Py_INCREF(self) + with nogil: + err = uv.uv_run(self.uvloop, mode) + Py_DECREF(self) + + if err < 0: + raise convert_error(err) + + cdef _run(self, uv.uv_run_mode mode): + cdef int err + + if self._closed == 1: + raise RuntimeError('unable to start the loop; it was closed') + + if self._running == 1: + raise RuntimeError('this event loop is already running.') + + if (aio_get_running_loop is not None and + aio_get_running_loop() is not None): + raise RuntimeError( + 'Cannot run the event loop while another loop is running') + + # reset _last_error + self._last_error = None + + self._thread_id = PyThread_get_thread_ident() + self._running = 1 + + self.handler_check__exec_writes.start() + self.handler_idle.start() + + self._setup_or_resume_signals() + + if aio_set_running_loop is not None: + aio_set_running_loop(self) + try: + self.__run(mode) + finally: + if aio_set_running_loop is not None: + aio_set_running_loop(None) + + self.handler_check__exec_writes.stop() + self.handler_idle.stop() + + self._pause_signals() + + self._thread_id = 0 + self._running = 0 + self._stopping = 0 + + if self._last_error is not None: + # The loop was stopped with an error with 'loop._stop(error)' call + raise self._last_error + + cdef _close(self): + cdef int err + + if self._running == 1: + raise RuntimeError("Cannot close a running event loop") + + if self._closed == 1: + return + + self._closed = 1 + + for cb_handle in self._ready: + cb_handle.cancel() + self._ready.clear() + self._ready_len = 0 + + if self._polls: + for poll_handle in self._polls.values(): + (poll_handle)._close() + + self._polls.clear() + + if self._timers: + for timer_cbhandle in tuple(self._timers): + timer_cbhandle.cancel() + + # Close all remaining handles + self.handler_async._close() + self.handler_idle._close() + self.handler_check__exec_writes._close() + __close_all_handles(self) + self._shutdown_signals() + # During this run there should be no open handles, + # so it should finish right away + self.__run(uv.UV_RUN_DEFAULT) + + if self._fd_to_writer_fileobj: + for fileobj in self._fd_to_writer_fileobj.values(): + socket_dec_io_ref(fileobj) + self._fd_to_writer_fileobj.clear() + + if self._fd_to_reader_fileobj: + for fileobj in self._fd_to_reader_fileobj.values(): + socket_dec_io_ref(fileobj) + self._fd_to_reader_fileobj.clear() + + if self._timers: + raise RuntimeError( + f"new timers were queued during loop closing: {self._timers}") + + if self._polls: + raise RuntimeError( + f"new poll handles were queued during loop closing: " + f"{self._polls}") + + if self._ready: + raise RuntimeError( + f"new callbacks were queued during loop closing: " + f"{self._ready}") + + err = uv.uv_loop_close(self.uvloop) + if err < 0: + raise convert_error(err) + + self.handler_async = None + self.handler_idle = None + self.handler_check__exec_writes = None + + self._executor_shutdown_called = True + executor = self._default_executor + if executor is not None: + self._default_executor = None + executor.shutdown(wait=False) + + cdef uint64_t _time(self): + # asyncio doesn't have a time cache, neither should uvloop. + uv.uv_update_time(self.uvloop) # void + return uv.uv_now(self.uvloop) + + cdef inline _queue_write(self, UVStream stream): + self._queued_streams.add(stream) + if not self.handler_check__exec_writes.running: + self.handler_check__exec_writes.start() + + cdef _exec_queued_writes(self): + if len(self._queued_streams) == 0: + if self.handler_check__exec_writes.running: + self.handler_check__exec_writes.stop() + return + + cdef: + UVStream stream + + streams = self._queued_streams + self._queued_streams = self._executing_streams + self._executing_streams = streams + try: + for pystream in streams: + stream = pystream + stream._exec_write() + finally: + streams.clear() + + if self.handler_check__exec_writes.running: + if len(self._queued_streams) == 0: + self.handler_check__exec_writes.stop() + + cdef inline _call_soon(self, object callback, object args, object context): + cdef Handle handle + handle = new_Handle(self, callback, args, context) + self._call_soon_handle(handle) + return handle + + cdef inline _append_ready_handle(self, Handle handle): + self._check_closed() + self._ready.append(handle) + self._ready_len += 1 + + cdef inline _call_soon_handle(self, Handle handle): + self._append_ready_handle(handle) + if not self.handler_idle.running: + self.handler_idle.start() + + cdef _call_later(self, uint64_t delay, object callback, object args, + object context): + return TimerHandle(self, callback, args, delay, context) + + cdef void _handle_exception(self, object ex): + if isinstance(ex, Exception): + self.call_exception_handler({'exception': ex}) + else: + # BaseException + self._last_error = ex + # Exit ASAP + self._stop(None) + + cdef inline _check_signal(self, sig): + if not isinstance(sig, int): + raise TypeError('sig must be an int, not {!r}'.format(sig)) + + if not (1 <= sig < signal_NSIG): + raise ValueError( + 'sig {} out of range(1, {})'.format(sig, signal_NSIG)) + + cdef inline _check_closed(self): + if self._closed == 1: + raise RuntimeError('Event loop is closed') + + cdef inline _check_thread(self): + if self._thread_id == 0: + return + + cdef uint64_t thread_id + thread_id = PyThread_get_thread_ident() + + if thread_id != self._thread_id: + raise RuntimeError( + "Non-thread-safe operation invoked on an event loop other " + "than the current one") + + cdef inline _new_future(self): + return aio_Future(loop=self) + + cdef _track_transport(self, UVBaseTransport transport): + self._transports[transport._fileno()] = transport + + cdef _track_process(self, UVProcess proc): + self._processes.add(proc) + + cdef _untrack_process(self, UVProcess proc): + self._processes.discard(proc) + + cdef _fileobj_to_fd(self, fileobj): + """Return a file descriptor from a file object. + + Parameters: + fileobj -- file object or file descriptor + + Returns: + corresponding file descriptor + + Raises: + ValueError if the object is invalid + """ + # Copy of the `selectors._fileobj_to_fd()` function. + if isinstance(fileobj, int): + fd = fileobj + else: + try: + fd = int(fileobj.fileno()) + except (AttributeError, TypeError, ValueError): + raise ValueError("Invalid file object: " + "{!r}".format(fileobj)) from None + if fd < 0: + raise ValueError("Invalid file descriptor: {}".format(fd)) + return fd + + cdef _ensure_fd_no_transport(self, fd): + cdef UVBaseTransport tr + try: + tr = (self._transports[fd]) + except KeyError: + pass + else: + if tr._is_alive(): + raise RuntimeError( + 'File descriptor {!r} is used by transport {!r}'.format( + fd, tr)) + + cdef _add_reader(self, fileobj, Handle handle): + cdef: + UVPoll poll + + self._check_closed() + fd = self._fileobj_to_fd(fileobj) + self._ensure_fd_no_transport(fd) + + try: + poll = (self._polls[fd]) + except KeyError: + poll = UVPoll.new(self, fd) + self._polls[fd] = poll + + poll.start_reading(handle) + + old_fileobj = self._fd_to_reader_fileobj.pop(fd, None) + if old_fileobj is not None: + socket_dec_io_ref(old_fileobj) + + self._fd_to_reader_fileobj[fd] = fileobj + socket_inc_io_ref(fileobj) + + cdef _remove_reader(self, fileobj): + cdef: + UVPoll poll + + fd = self._fileobj_to_fd(fileobj) + self._ensure_fd_no_transport(fd) + + mapped_fileobj = self._fd_to_reader_fileobj.pop(fd, None) + if mapped_fileobj is not None: + socket_dec_io_ref(mapped_fileobj) + + if self._closed == 1: + return False + + try: + poll = (self._polls[fd]) + except KeyError: + return False + + result = poll.stop_reading() + if not poll.is_active(): + del self._polls[fd] + poll._close() + + return result + + cdef _has_reader(self, fileobj): + cdef: + UVPoll poll + + self._check_closed() + fd = self._fileobj_to_fd(fileobj) + + try: + poll = (self._polls[fd]) + except KeyError: + return False + + return poll.is_reading() + + cdef _add_writer(self, fileobj, Handle handle): + cdef: + UVPoll poll + + self._check_closed() + fd = self._fileobj_to_fd(fileobj) + self._ensure_fd_no_transport(fd) + + try: + poll = (self._polls[fd]) + except KeyError: + poll = UVPoll.new(self, fd) + self._polls[fd] = poll + + poll.start_writing(handle) + + old_fileobj = self._fd_to_writer_fileobj.pop(fd, None) + if old_fileobj is not None: + socket_dec_io_ref(old_fileobj) + + self._fd_to_writer_fileobj[fd] = fileobj + socket_inc_io_ref(fileobj) + + cdef _remove_writer(self, fileobj): + cdef: + UVPoll poll + + fd = self._fileobj_to_fd(fileobj) + self._ensure_fd_no_transport(fd) + + mapped_fileobj = self._fd_to_writer_fileobj.pop(fd, None) + if mapped_fileobj is not None: + socket_dec_io_ref(mapped_fileobj) + + if self._closed == 1: + return False + + try: + poll = (self._polls[fd]) + except KeyError: + return False + + result = poll.stop_writing() + if not poll.is_active(): + del self._polls[fd] + poll._close() + + return result + + cdef _has_writer(self, fileobj): + cdef: + UVPoll poll + + self._check_closed() + fd = self._fileobj_to_fd(fileobj) + + try: + poll = (self._polls[fd]) + except KeyError: + return False + + return poll.is_writing() + + cdef _getaddrinfo(self, object host, object port, + int family, int type, + int proto, int flags, + int unpack): + + if isinstance(port, str): + port = port.encode() + elif isinstance(port, int): + port = str(port).encode() + if port is not None and not isinstance(port, bytes): + raise TypeError('port must be a str, bytes or int') + + if isinstance(host, str): + host = host.encode('idna') + if host is not None: + if not isinstance(host, bytes): + raise TypeError('host must be a str or bytes') + + fut = self._new_future() + + def callback(result): + if AddrInfo.isinstance(result): + try: + if unpack == 0: + data = result + else: + data = (result).unpack() + except (KeyboardInterrupt, SystemExit): + raise + except BaseException as ex: + if not fut.cancelled(): + fut.set_exception(ex) + else: + if not fut.cancelled(): + fut.set_result(data) + else: + if not fut.cancelled(): + fut.set_exception(result) + + AddrInfoRequest(self, host, port, family, type, proto, flags, callback) + return fut + + cdef _getnameinfo(self, system.sockaddr *addr, int flags): + cdef NameInfoRequest nr + fut = self._new_future() + + def callback(result): + if isinstance(result, tuple): + fut.set_result(result) + else: + fut.set_exception(result) + + nr = NameInfoRequest(self, callback) + nr.query(addr, flags) + return fut + + cdef _sock_recv(self, fut, sock, n): + if UVLOOP_DEBUG: + if fut.cancelled(): + # Shouldn't happen with _SyncSocketReaderFuture. + raise RuntimeError( + f'_sock_recv is called on a cancelled Future') + + if not self._has_reader(sock): + raise RuntimeError( + f'socket {sock!r} does not have a reader ' + f'in the _sock_recv callback') + + try: + data = sock.recv(n) + except (BlockingIOError, InterruptedError): + # No need to re-add the reader, let's just wait until + # the poll handler calls this callback again. + pass + except (KeyboardInterrupt, SystemExit): + raise + except BaseException as exc: + fut.set_exception(exc) + self._remove_reader(sock) + else: + fut.set_result(data) + self._remove_reader(sock) + + cdef _sock_recv_into(self, fut, sock, buf): + if UVLOOP_DEBUG: + if fut.cancelled(): + # Shouldn't happen with _SyncSocketReaderFuture. + raise RuntimeError( + f'_sock_recv_into is called on a cancelled Future') + + if not self._has_reader(sock): + raise RuntimeError( + f'socket {sock!r} does not have a reader ' + f'in the _sock_recv_into callback') + + try: + data = sock.recv_into(buf) + except (BlockingIOError, InterruptedError): + # No need to re-add the reader, let's just wait until + # the poll handler calls this callback again. + pass + except (KeyboardInterrupt, SystemExit): + raise + except BaseException as exc: + fut.set_exception(exc) + self._remove_reader(sock) + else: + fut.set_result(data) + self._remove_reader(sock) + + cdef _sock_sendall(self, fut, sock, data): + cdef: + Handle handle + int n + + if UVLOOP_DEBUG: + if fut.cancelled(): + # Shouldn't happen with _SyncSocketWriterFuture. + raise RuntimeError( + f'_sock_sendall is called on a cancelled Future') + + if not self._has_writer(sock): + raise RuntimeError( + f'socket {sock!r} does not have a writer ' + f'in the _sock_sendall callback') + + try: + n = sock.send(data) + except (BlockingIOError, InterruptedError): + # Try next time. + return + except (KeyboardInterrupt, SystemExit): + raise + except BaseException as exc: + fut.set_exception(exc) + self._remove_writer(sock) + return + + self._remove_writer(sock) + + if n == len(data): + fut.set_result(None) + else: + if n: + if not isinstance(data, memoryview): + data = memoryview(data) + data = data[n:] + + handle = new_MethodHandle3( + self, + "Loop._sock_sendall", + self._sock_sendall, + None, + self, + fut, sock, data) + + self._add_writer(sock, handle) + + cdef _sock_accept(self, fut, sock): + try: + conn, address = sock.accept() + conn.setblocking(False) + except (BlockingIOError, InterruptedError): + # There is an active reader for _sock_accept, so + # do nothing, it will be called again. + pass + except (KeyboardInterrupt, SystemExit): + raise + except BaseException as exc: + fut.set_exception(exc) + self._remove_reader(sock) + else: + fut.set_result((conn, address)) + self._remove_reader(sock) + + cdef _sock_connect(self, sock, address): + cdef: + Handle handle + + try: + sock.connect(address) + except (BlockingIOError, InterruptedError): + pass + else: + return + + fut = _SyncSocketWriterFuture(sock, self) + handle = new_MethodHandle3( + self, + "Loop._sock_connect", + self._sock_connect_cb, + None, + self, + fut, sock, address) + + self._add_writer(sock, handle) + return fut + + cdef _sock_connect_cb(self, fut, sock, address): + if UVLOOP_DEBUG: + if fut.cancelled(): + # Shouldn't happen with _SyncSocketWriterFuture. + raise RuntimeError( + f'_sock_connect_cb is called on a cancelled Future') + + if not self._has_writer(sock): + raise RuntimeError( + f'socket {sock!r} does not have a writer ' + f'in the _sock_connect_cb callback') + + try: + err = sock.getsockopt(uv.SOL_SOCKET, uv.SO_ERROR) + if err != 0: + # Jump to any except clause below. + raise OSError(err, 'Connect call failed %s' % (address,)) + except (BlockingIOError, InterruptedError): + # socket is still registered, the callback will be retried later + pass + except (KeyboardInterrupt, SystemExit): + raise + except BaseException as exc: + fut.set_exception(exc) + self._remove_writer(sock) + else: + fut.set_result(None) + self._remove_writer(sock) + + cdef _sock_set_reuseport(self, int fd): + cdef: + int err = 0 + int reuseport_flag = 1 + + err = system.setsockopt( + fd, + uv.SOL_SOCKET, + SO_REUSEPORT, + &reuseport_flag, + sizeof(reuseport_flag)) + + if err < 0: + raise convert_error(-errno.errno) + + cdef _set_coroutine_debug(self, bint enabled): + enabled = bool(enabled) + if self._coroutine_debug_set == enabled: + return + + if enabled: + self._coroutine_origin_tracking_saved_depth = ( + sys.get_coroutine_origin_tracking_depth()) + sys.set_coroutine_origin_tracking_depth( + DEBUG_STACK_DEPTH) + else: + sys.set_coroutine_origin_tracking_depth( + self._coroutine_origin_tracking_saved_depth) + + self._coroutine_debug_set = enabled + + def _get_backend_id(self): + """This method is used by uvloop tests and is not part of the API.""" + return uv.uv_backend_fd(self.uvloop) + + cdef _print_debug_info(self): + cdef: + int err + uv.uv_rusage_t rusage + + err = uv.uv_getrusage(&rusage) + if err < 0: + raise convert_error(err) + + # OS + + print('---- Process info: -----') + print('Process memory: {}'.format(rusage.ru_maxrss)) + print('Number of signals: {}'.format(rusage.ru_nsignals)) + print('') + + # Loop + + print('--- Loop debug info: ---') + print('Loop time: {}'.format(self.time())) + print('Errors logged: {}'.format( + self._debug_exception_handler_cnt)) + print() + print('Callback handles: {: <8} | {}'.format( + self._debug_cb_handles_count, + self._debug_cb_handles_total)) + print('Timer handles: {: <8} | {}'.format( + self._debug_cb_timer_handles_count, + self._debug_cb_timer_handles_total)) + print() + + print(' alive | closed |') + print('UVHandles python | libuv | total') + print(' objs | handles |') + print('-------------------------------+---------+---------') + for name in sorted(self._debug_handles_total): + print(' {: <18} {: >7} | {: >7} | {: >7}'.format( + name, + self._debug_handles_current[name], + self._debug_handles_closed[name], + self._debug_handles_total[name])) + print() + + print('uv_handle_t (current: {}; freed: {}; total: {})'.format( + self._debug_uv_handles_total - self._debug_uv_handles_freed, + self._debug_uv_handles_freed, + self._debug_uv_handles_total)) + print() + + print('--- Streams debug info: ---') + print('Write errors: {}'.format( + self._debug_stream_write_errors_total)) + print('Write without poll: {}'.format( + self._debug_stream_write_tries)) + print('Write contexts: {: <8} | {}'.format( + self._debug_stream_write_ctx_cnt, + self._debug_stream_write_ctx_total)) + print('Write failed callbacks: {}'.format( + self._debug_stream_write_cb_errors_total)) + print() + print('Read errors: {}'.format( + self._debug_stream_read_errors_total)) + print('Read callbacks: {}'.format( + self._debug_stream_read_cb_total)) + print('Read failed callbacks: {}'.format( + self._debug_stream_read_cb_errors_total)) + print('Read EOFs: {}'.format( + self._debug_stream_read_eof_total)) + print('Read EOF failed callbacks: {}'.format( + self._debug_stream_read_eof_cb_errors_total)) + print() + print('Listen errors: {}'.format( + self._debug_stream_listen_errors_total)) + print('Shutdown errors {}'.format( + self._debug_stream_shutdown_errors_total)) + print() + + print('--- Polls debug info: ---') + print('Read events: {}'.format( + self._poll_read_events_total)) + print('Read callbacks failed: {}'.format( + self._poll_read_cb_errors_total)) + print('Write events: {}'.format( + self._poll_write_events_total)) + print('Write callbacks failed: {}'.format( + self._poll_write_cb_errors_total)) + print() + + print('--- Sock ops successful on 1st try: ---') + print('Socket try-writes: {}'.format( + self._sock_try_write_total)) + + print(flush=True) + + property print_debug_info: + def __get__(self): + if UVLOOP_DEBUG: + return lambda: self._print_debug_info() + else: + raise AttributeError('print_debug_info') + + # Public API + + def __repr__(self): + return '<{}.{} running={} closed={} debug={}>'.format( + self.__class__.__module__, + self.__class__.__name__, + self.is_running(), + self.is_closed(), + self.get_debug() + ) + + def call_soon(self, callback, *args, context=None): + """Arrange for a callback to be called as soon as possible. + + This operates as a FIFO queue: callbacks are called in the + order in which they are registered. Each callback will be + called exactly once. + + Any positional arguments after the callback will be passed to + the callback when it is called. + """ + if self._debug == 1: + self._check_thread() + if args: + return self._call_soon(callback, args, context) + else: + return self._call_soon(callback, None, context) + + def call_soon_threadsafe(self, callback, *args, context=None): + """Like call_soon(), but thread-safe.""" + if not args: + args = None + cdef Handle handle = new_Handle(self, callback, args, context) + self._append_ready_handle(handle) # deque append is atomic + # libuv async handler is thread-safe while the idle handler is not - + # we only set the async handler here, which will start the idle handler + # in _on_wake() from the loop and eventually call the callback. + self.handler_async.send() + return handle + + def call_later(self, delay, callback, *args, context=None): + """Arrange for a callback to be called at a given time. + + Return a Handle: an opaque object with a cancel() method that + can be used to cancel the call. + + The delay can be an int or float, expressed in seconds. It is + always relative to the current time. + + Each callback will be called exactly once. If two callbacks + are scheduled for exactly the same time, it undefined which + will be called first. + + Any positional arguments after the callback will be passed to + the callback when it is called. + """ + cdef uint64_t when + + self._check_closed() + if self._debug == 1: + self._check_thread() + + if delay < 0: + delay = 0 + elif delay == py_inf or delay > MAX_SLEEP: + # ~100 years sounds like a good approximation of + # infinity for a Python application. + delay = MAX_SLEEP + + when = round(delay * 1000) + if not args: + args = None + if when == 0: + return self._call_soon(callback, args, context) + else: + return self._call_later(when, callback, args, context) + + def call_at(self, when, callback, *args, context=None): + """Like call_later(), but uses an absolute time. + + Absolute time corresponds to the event loop's time() method. + """ + return self.call_later( + when - self.time(), callback, *args, context=context) + + def time(self): + """Return the time according to the event loop's clock. + + This is a float expressed in seconds since an epoch, but the + epoch, precision, accuracy and drift are unspecified and may + differ per event loop. + """ + return self._time() / 1000 + + def stop(self): + """Stop running the event loop. + + Every callback already scheduled will still run. This simply informs + run_forever to stop looping after a complete iteration. + """ + self._call_soon_handle( + new_MethodHandle1( + self, + "Loop._stop", + self._stop, + None, + self, + None)) + + def run_forever(self): + """Run the event loop until stop() is called.""" + self._check_closed() + mode = uv.UV_RUN_DEFAULT + if self._stopping: + # loop.stop() was called right before loop.run_forever(). + # This is how asyncio loop behaves. + mode = uv.UV_RUN_NOWAIT + self._set_coroutine_debug(self._debug) + old_agen_hooks = sys.get_asyncgen_hooks() + sys.set_asyncgen_hooks(firstiter=self._asyncgen_firstiter_hook, + finalizer=self._asyncgen_finalizer_hook) + try: + self._run(mode) + finally: + self._set_coroutine_debug(False) + sys.set_asyncgen_hooks(*old_agen_hooks) + + def close(self): + """Close the event loop. + + The event loop must not be running. + + This is idempotent and irreversible. + + No other methods should be called after this one. + """ + self._close() + + def get_debug(self): + return bool(self._debug) + + def set_debug(self, enabled): + self._debug = bool(enabled) + if self.is_running(): + self.call_soon_threadsafe(self._set_coroutine_debug, self._debug) + + def is_running(self): + """Return whether the event loop is currently running.""" + return bool(self._running) + + def is_closed(self): + """Returns True if the event loop was closed.""" + return bool(self._closed) + + def create_future(self): + """Create a Future object attached to the loop.""" + return self._new_future() + + def create_task(self, coro, *, name=None, context=None): + """Schedule a coroutine object. + + Return a task object. + + If name is not None, task.set_name(name) will be called if the task + object has the set_name attribute, true for default Task in CPython. + + An optional keyword-only context argument allows specifying a custom + contextvars.Context for the coro to run in. The current context copy is + created when no context is provided. + """ + self._check_closed() + if PY311: + if self._task_factory is None: + task = aio_Task(coro, loop=self, context=context) + else: + task = self._task_factory(self, coro, context=context) + else: + if context is None: + if self._task_factory is None: + task = aio_Task(coro, loop=self) + else: + task = self._task_factory(self, coro) + else: + if self._task_factory is None: + task = context.run(aio_Task, coro, self) + else: + task = context.run(self._task_factory, self, coro) + + # copied from asyncio.tasks._set_task_name (bpo-34270) + if name is not None: + try: + set_name = task.set_name + except AttributeError: + pass + else: + set_name(name) + + return task + + def set_task_factory(self, factory): + """Set a task factory that will be used by loop.create_task(). + + If factory is None the default task factory will be set. + + If factory is a callable, it should have a signature matching + '(loop, coro)', where 'loop' will be a reference to the active + event loop, 'coro' will be a coroutine object. The callable + must return a Future. + """ + if factory is not None and not callable(factory): + raise TypeError('task factory must be a callable or None') + self._task_factory = factory + + def get_task_factory(self): + """Return a task factory, or None if the default one is in use.""" + return self._task_factory + + def run_until_complete(self, future): + """Run until the Future is done. + + If the argument is a coroutine, it is wrapped in a Task. + + WARNING: It would be disastrous to call run_until_complete() + with the same coroutine twice -- it would wrap it in two + different Tasks and that can't be good. + + Return the Future's result, or raise its exception. + """ + self._check_closed() + + new_task = not isfuture(future) + future = aio_ensure_future(future, loop=self) + if new_task: + # An exception is raised if the future didn't complete, so there + # is no need to log the "destroy pending task" message + future._log_destroy_pending = False + + def done_cb(fut): + if not fut.cancelled(): + exc = fut.exception() + if isinstance(exc, (SystemExit, KeyboardInterrupt)): + # Issue #336: run_forever() already finished, + # no need to stop it. + return + self.stop() + + future.add_done_callback(done_cb) + try: + self.run_forever() + except BaseException: + if new_task and future.done() and not future.cancelled(): + # The coroutine raised a BaseException. Consume the exception + # to not log a warning, the caller doesn't have access to the + # local task. + future.exception() + raise + finally: + future.remove_done_callback(done_cb) + if not future.done(): + raise RuntimeError('Event loop stopped before Future completed.') + + return future.result() + + @cython.iterable_coroutine + async def getaddrinfo(self, object host, object port, *, + int family=0, int type=0, int proto=0, int flags=0): + + addr = __static_getaddrinfo_pyaddr(host, port, family, + type, proto, flags) + if addr is not None: + return [addr] + + return await self._getaddrinfo( + host, port, family, type, proto, flags, 1) + + @cython.iterable_coroutine + async def getnameinfo(self, sockaddr, int flags=0): + cdef: + AddrInfo ai_cnt + system.addrinfo *ai + system.sockaddr_in6 *sin6 + + if not isinstance(sockaddr, tuple): + raise TypeError('getnameinfo() argument 1 must be a tuple') + + sl = len(sockaddr) + + if sl < 2 or sl > 4: + raise ValueError('sockaddr must be a tuple of 2, 3 or 4 values') + + if sl > 2: + flowinfo = sockaddr[2] + if flowinfo < 0 or flowinfo > 0xfffff: + raise OverflowError( + 'getnameinfo(): flowinfo must be 0-1048575.') + else: + flowinfo = 0 + + if sl > 3: + scope_id = sockaddr[3] + if scope_id < 0 or scope_id > 2 ** 32: + raise OverflowError( + 'getsockaddrarg: scope_id must be unsigned 32 bit integer') + else: + scope_id = 0 + + ai_cnt = await self._getaddrinfo( + sockaddr[0], sockaddr[1], + uv.AF_UNSPEC, # family + uv.SOCK_DGRAM, # type + 0, # proto + uv.AI_NUMERICHOST, # flags + 0) # unpack + + ai = ai_cnt.data + + if ai.ai_next: + raise OSError("sockaddr resolved to multiple addresses") + + if ai.ai_family == uv.AF_INET: + if sl > 2: + raise OSError("IPv4 sockaddr must be 2 tuple") + elif ai.ai_family == uv.AF_INET6: + # Modify some fields in `ai` + sin6 = ai.ai_addr + sin6.sin6_flowinfo = system.htonl(flowinfo) + sin6.sin6_scope_id = scope_id + + return await self._getnameinfo(ai.ai_addr, flags) + + @cython.iterable_coroutine + async def start_tls(self, transport, protocol, sslcontext, *, + server_side=False, + server_hostname=None, + ssl_handshake_timeout=None, + ssl_shutdown_timeout=None): + """Upgrade transport to TLS. + + Return a new transport that *protocol* should start using + immediately. + """ + if not isinstance(sslcontext, ssl_SSLContext): + raise TypeError( + f'sslcontext is expected to be an instance of ssl.SSLContext, ' + f'got {sslcontext!r}') + + if isinstance(transport, (TCPTransport, UnixTransport)): + context = (transport).context + elif isinstance(transport, _SSLProtocolTransport): + context = (<_SSLProtocolTransport>transport).context + else: + raise TypeError( + f'transport {transport!r} is not supported by start_tls()') + + waiter = self._new_future() + ssl_protocol = SSLProtocol( + self, protocol, sslcontext, waiter, + server_side, server_hostname, + ssl_handshake_timeout=ssl_handshake_timeout, + ssl_shutdown_timeout=ssl_shutdown_timeout, + call_connection_made=False) + + # Pause early so that "ssl_protocol.data_received()" doesn't + # have a chance to get called before "ssl_protocol.connection_made()". + transport.pause_reading() + + transport.set_protocol(ssl_protocol) + conmade_cb = self.call_soon(ssl_protocol.connection_made, transport, + context=context) + # transport.resume_reading() will use the right context + # (transport.context) to call e.g. data_received() + resume_cb = self.call_soon(transport.resume_reading) + app_transport = ssl_protocol._get_app_transport(context) + + try: + await waiter + except (KeyboardInterrupt, SystemExit): + raise + except BaseException: + app_transport.close() + conmade_cb.cancel() + resume_cb.cancel() + raise + + return app_transport + + @cython.iterable_coroutine + async def create_server(self, protocol_factory, host=None, port=None, + *, + int family=uv.AF_UNSPEC, + int flags=uv.AI_PASSIVE, + sock=None, + backlog=100, + ssl=None, + reuse_address=None, + reuse_port=None, + ssl_handshake_timeout=None, + ssl_shutdown_timeout=None, + start_serving=True): + """A coroutine which creates a TCP server bound to host and port. + + The return value is a Server object which can be used to stop + the service. + + If host is an empty string or None all interfaces are assumed + and a list of multiple sockets will be returned (most likely + one for IPv4 and another one for IPv6). The host parameter can also be + a sequence (e.g. list) of hosts to bind to. + + family can be set to either AF_INET or AF_INET6 to force the + socket to use IPv4 or IPv6. If not set it will be determined + from host (defaults to AF_UNSPEC). + + flags is a bitmask for getaddrinfo(). + + sock can optionally be specified in order to use a preexisting + socket object. + + backlog is the maximum number of queued connections passed to + listen() (defaults to 100). + + ssl can be set to an SSLContext to enable SSL over the + accepted connections. + + reuse_address tells the kernel to reuse a local socket in + TIME_WAIT state, without waiting for its natural timeout to + expire. If not specified will automatically be set to True on + UNIX. + + reuse_port tells the kernel to allow this endpoint to be bound to + the same port as other existing endpoints are bound to, so long as + they all set this flag when being created. This option is not + supported on Windows. + + ssl_handshake_timeout is the time in seconds that an SSL server + will wait for completion of the SSL handshake before aborting the + connection. Default is 60s. + + ssl_shutdown_timeout is the time in seconds that an SSL server + will wait for completion of the SSL shutdown before aborting the + connection. Default is 30s. + """ + cdef: + TCPServer tcp + system.addrinfo *addrinfo + Server server + + if sock is not None and sock.family == uv.AF_UNIX: + if host is not None or port is not None: + raise ValueError( + 'host/port and sock can not be specified at the same time') + return await self.create_unix_server( + protocol_factory, sock=sock, backlog=backlog, ssl=ssl, + start_serving=start_serving, + # asyncio won't clean up socket file using create_server() API + cleanup_socket=False, + ) + + server = Server(self) + + if ssl is not None: + if not isinstance(ssl, ssl_SSLContext): + raise TypeError('ssl argument must be an SSLContext or None') + else: + if ssl_handshake_timeout is not None: + raise ValueError( + 'ssl_handshake_timeout is only meaningful with ssl') + if ssl_shutdown_timeout is not None: + raise ValueError( + 'ssl_shutdown_timeout is only meaningful with ssl') + + if host is not None or port is not None: + if sock is not None: + raise ValueError( + 'host/port and sock can not be specified at the same time') + + if reuse_address is None: + reuse_address = os_name == 'posix' and sys_platform != 'cygwin' + reuse_port = bool(reuse_port) + if reuse_port and not has_SO_REUSEPORT: + raise ValueError( + 'reuse_port not supported by socket module') + + if host == '': + hosts = [None] + elif (isinstance(host, str) or not isinstance(host, col_Iterable)): + hosts = [host] + else: + hosts = host + + fs = [self._getaddrinfo(host, port, family, + uv.SOCK_STREAM, 0, flags, + 0) for host in hosts] + + infos = await aio_gather(*fs) + + completed = False + sock = None + try: + for info in infos: + addrinfo = (info).data + while addrinfo != NULL: + if addrinfo.ai_family == uv.AF_UNSPEC: + raise RuntimeError('AF_UNSPEC in DNS results') + + try: + sock = socket_socket(addrinfo.ai_family, + addrinfo.ai_socktype, + addrinfo.ai_protocol) + except socket_error: + # Assume it's a bad family/type/protocol + # combination. + if self._debug: + aio_logger.warning( + 'create_server() failed to create ' + 'socket.socket(%r, %r, %r)', + addrinfo.ai_family, + addrinfo.ai_socktype, + addrinfo.ai_protocol, exc_info=True) + addrinfo = addrinfo.ai_next + continue + + if reuse_address: + sock.setsockopt(uv.SOL_SOCKET, uv.SO_REUSEADDR, 1) + if reuse_port: + sock.setsockopt(uv.SOL_SOCKET, SO_REUSEPORT, 1) + # Disable IPv4/IPv6 dual stack support (enabled by + # default on Linux) which makes a single socket + # listen on both address families. + if (addrinfo.ai_family == uv.AF_INET6 and + has_IPV6_V6ONLY): + sock.setsockopt(uv.IPPROTO_IPV6, IPV6_V6ONLY, 1) + + pyaddr = __convert_sockaddr_to_pyaddr(addrinfo.ai_addr) + try: + sock.bind(pyaddr) + except OSError as err: + raise OSError( + err.errno, 'error while attempting ' + 'to bind on address %r: %s' + % (pyaddr, err.strerror.lower())) from None + + tcp = TCPServer.new(self, protocol_factory, server, + uv.AF_UNSPEC, backlog, + ssl, ssl_handshake_timeout, + ssl_shutdown_timeout) + + try: + tcp._open(sock.fileno()) + except (KeyboardInterrupt, SystemExit): + raise + except BaseException: + tcp._close() + raise + + server._add_server(tcp) + sock.detach() + sock = None + + addrinfo = addrinfo.ai_next + + completed = True + finally: + if not completed: + if sock is not None: + sock.close() + server.close() + else: + if sock is None: + raise ValueError('Neither host/port nor sock were specified') + if not _is_sock_stream(sock.type): + raise ValueError( + 'A Stream Socket was expected, got {!r}'.format(sock)) + + # libuv will set the socket to non-blocking mode, but + # we want Python socket object to notice that. + sock.setblocking(False) + + tcp = TCPServer.new(self, protocol_factory, server, + uv.AF_UNSPEC, backlog, + ssl, ssl_handshake_timeout, + ssl_shutdown_timeout) + + try: + tcp._open(sock.fileno()) + except (KeyboardInterrupt, SystemExit): + raise + except BaseException: + tcp._close() + raise + + tcp._attach_fileobj(sock) + server._add_server(tcp) + + if start_serving: + server._start_serving() + + server._ref() + return server + + @cython.iterable_coroutine + async def create_connection(self, protocol_factory, host=None, port=None, + *, + ssl=None, + family=0, proto=0, flags=0, sock=None, + local_addr=None, server_hostname=None, + ssl_handshake_timeout=None, + ssl_shutdown_timeout=None): + """Connect to a TCP server. + + Create a streaming transport connection to a given Internet host and + port: socket family AF_INET or socket.AF_INET6 depending on host (or + family if specified), socket type SOCK_STREAM. protocol_factory must be + a callable returning a protocol instance. + + This method is a coroutine which will try to establish the connection + in the background. When successful, the coroutine returns a + (transport, protocol) pair. + """ + cdef: + AddrInfo ai_local = None + AddrInfo ai_remote + TCPTransport tr + + system.addrinfo *rai = NULL + system.addrinfo *lai = NULL + + system.addrinfo *rai_iter = NULL + system.addrinfo *lai_iter = NULL + + system.addrinfo rai_static + system.sockaddr_storage rai_addr_static + system.addrinfo lai_static + system.sockaddr_storage lai_addr_static + + object app_protocol + object app_transport + object protocol + object ssl_waiter + + if sock is not None and sock.family == uv.AF_UNIX: + if host is not None or port is not None: + raise ValueError( + 'host/port and sock can not be specified at the same time') + return await self.create_unix_connection( + protocol_factory, None, + sock=sock, ssl=ssl, server_hostname=server_hostname) + + app_protocol = protocol = protocol_factory() + ssl_waiter = None + context = Context_CopyCurrent() + if ssl: + if server_hostname is None: + if not host: + raise ValueError('You must set server_hostname ' + 'when using ssl without a host') + server_hostname = host + + ssl_waiter = self._new_future() + sslcontext = None if isinstance(ssl, bool) else ssl + protocol = SSLProtocol( + self, app_protocol, sslcontext, ssl_waiter, + False, server_hostname, + ssl_handshake_timeout=ssl_handshake_timeout, + ssl_shutdown_timeout=ssl_shutdown_timeout) + else: + if server_hostname is not None: + raise ValueError('server_hostname is only meaningful with ssl') + if ssl_handshake_timeout is not None: + raise ValueError( + 'ssl_handshake_timeout is only meaningful with ssl') + if ssl_shutdown_timeout is not None: + raise ValueError( + 'ssl_shutdown_timeout is only meaningful with ssl') + + if host is not None or port is not None: + if sock is not None: + raise ValueError( + 'host/port and sock can not be specified at the same time') + + fs = [] + f1 = f2 = None + + addr = __static_getaddrinfo( + host, port, family, uv.SOCK_STREAM, + proto, &rai_addr_static) + + if addr is None: + f1 = self._getaddrinfo( + host, port, family, + uv.SOCK_STREAM, proto, flags, + 0) # 0 == don't unpack + + fs.append(f1) + else: + rai_static.ai_addr = &rai_addr_static + rai_static.ai_next = NULL + rai = &rai_static + + if local_addr is not None: + if not isinstance(local_addr, (tuple, list)) or \ + len(local_addr) != 2: + raise ValueError( + 'local_addr must be a tuple of host and port') + + addr = __static_getaddrinfo( + local_addr[0], local_addr[1], + family, uv.SOCK_STREAM, + proto, &lai_addr_static) + if addr is None: + f2 = self._getaddrinfo( + local_addr[0], local_addr[1], family, + uv.SOCK_STREAM, proto, flags, + 0) # 0 == don't unpack + + fs.append(f2) + else: + lai_static.ai_addr = &lai_addr_static + lai_static.ai_next = NULL + lai = &lai_static + + if len(fs): + await aio_wait(fs) + + if rai is NULL: + ai_remote = f1.result() + if ai_remote.data is NULL: + raise OSError('getaddrinfo() returned empty list') + rai = ai_remote.data + + if lai is NULL and f2 is not None: + ai_local = f2.result() + if ai_local.data is NULL: + raise OSError( + 'getaddrinfo() returned empty list for local_addr') + lai = ai_local.data + + exceptions = [] + rai_iter = rai + while rai_iter is not NULL: + tr = None + try: + waiter = self._new_future() + tr = TCPTransport.new(self, protocol, None, waiter, + context) + + if lai is not NULL: + lai_iter = lai + while lai_iter is not NULL: + try: + tr.bind(lai_iter.ai_addr) + break + except OSError as exc: + exceptions.append(exc) + lai_iter = lai_iter.ai_next + else: + tr._close() + tr = None + + rai_iter = rai_iter.ai_next + continue + + tr.connect(rai_iter.ai_addr) + await waiter + + except OSError as exc: + if tr is not None: + tr._close() + tr = None + exceptions.append(exc) + except (KeyboardInterrupt, SystemExit): + raise + except BaseException: + if tr is not None: + tr._close() + tr = None + raise + else: + break + + rai_iter = rai_iter.ai_next + + else: + # If they all have the same str(), raise one. + model = str(exceptions[0]) + if all(str(exc) == model for exc in exceptions): + raise exceptions[0] + # Raise a combined exception so the user can see all + # the various error messages. + raise OSError('Multiple exceptions: {}'.format( + ', '.join(str(exc) for exc in exceptions))) + else: + if sock is None: + raise ValueError( + 'host and port was not specified and no sock specified') + if not _is_sock_stream(sock.type): + raise ValueError( + 'A Stream Socket was expected, got {!r}'.format(sock)) + + # libuv will set the socket to non-blocking mode, but + # we want Python socket object to notice that. + sock.setblocking(False) + + waiter = self._new_future() + tr = TCPTransport.new(self, protocol, None, waiter, context) + try: + # libuv will make socket non-blocking + tr._open(sock.fileno()) + tr._init_protocol() + await waiter + except (KeyboardInterrupt, SystemExit): + raise + except BaseException: + # It's OK to call `_close()` here, as opposed to + # `_force_close()` or `close()` as we want to terminate the + # transport immediately. The `waiter` can only be waken + # up in `Transport._call_connection_made()`, and calling + # `_close()` before it is fine. + tr._close() + raise + + tr._attach_fileobj(sock) + + if ssl: + app_transport = protocol._get_app_transport(context) + try: + await ssl_waiter + except (KeyboardInterrupt, SystemExit): + raise + except BaseException: + app_transport.close() + raise + return app_transport, app_protocol + else: + return tr, protocol + + @cython.iterable_coroutine + async def create_unix_server(self, protocol_factory, path=None, + *, backlog=100, sock=None, ssl=None, + ssl_handshake_timeout=None, + ssl_shutdown_timeout=None, + start_serving=True, cleanup_socket=PY313): + """A coroutine which creates a UNIX Domain Socket server. + + The return value is a Server object, which can be used to stop + the service. + + path is a str, representing a file systsem path to bind the + server socket to. + + sock can optionally be specified in order to use a preexisting + socket object. + + backlog is the maximum number of queued connections passed to + listen() (defaults to 100). + + ssl can be set to an SSLContext to enable SSL over the + accepted connections. + + ssl_handshake_timeout is the time in seconds that an SSL server + will wait for completion of the SSL handshake before aborting the + connection. Default is 60s. + + ssl_shutdown_timeout is the time in seconds that an SSL server + will wait for completion of the SSL shutdown before aborting the + connection. Default is 30s. + + If *cleanup_socket* is true then the Unix socket will automatically + be removed from the filesystem when the server is closed, unless the + socket has been replaced after the server has been created. + This defaults to True on Python 3.13 and above, or False otherwise. + """ + cdef: + UnixServer pipe + Server server = Server(self) + + if ssl is not None: + if not isinstance(ssl, ssl_SSLContext): + raise TypeError('ssl argument must be an SSLContext or None') + else: + if ssl_handshake_timeout is not None: + raise ValueError( + 'ssl_handshake_timeout is only meaningful with ssl') + if ssl_shutdown_timeout is not None: + raise ValueError( + 'ssl_shutdown_timeout is only meaningful with ssl') + + if path is not None: + if sock is not None: + raise ValueError( + 'path and sock can not be specified at the same time') + orig_path = path + + path = os_fspath(path) + + if isinstance(path, str): + path = PyUnicode_EncodeFSDefault(path) + + # Check for abstract socket. + if path[0] != 0: + try: + if stat_S_ISSOCK(os_stat(path).st_mode): + os_remove(path) + except FileNotFoundError: + pass + except OSError as err: + # Directory may have permissions only to create socket. + aio_logger.error( + 'Unable to check or remove stale UNIX socket %r: %r', + orig_path, err) + + # We use Python sockets to create a UNIX server socket because + # when UNIX sockets are created by libuv, libuv removes the path + # they were bound to. This is different from asyncio, which + # doesn't cleanup the socket path. + sock = socket_socket(uv.AF_UNIX) + + try: + sock.bind(path) + except OSError as exc: + sock.close() + if exc.errno == errno.EADDRINUSE: + # Let's improve the error message by adding + # with what exact address it occurs. + msg = 'Address {!r} is already in use'.format(orig_path) + raise OSError(errno.EADDRINUSE, msg) from None + else: + raise + except (KeyboardInterrupt, SystemExit): + raise + except BaseException: + sock.close() + raise + + else: + if sock is None: + raise ValueError( + 'path was not specified, and no sock specified') + + if sock.family != uv.AF_UNIX or not _is_sock_stream(sock.type): + raise ValueError( + 'A UNIX Domain Stream Socket was expected, got {!r}' + .format(sock)) + + # libuv will set the socket to non-blocking mode, but + # we want Python socket object to notice that. + sock.setblocking(False) + + if cleanup_socket: + path = sock.getsockname() + # Check for abstract socket. `str` and `bytes` paths are supported. + if path[0] not in (0, '\x00'): + try: + self._unix_server_sockets[sock] = os_stat(path).st_ino + except FileNotFoundError: + pass + + pipe = UnixServer.new( + self, protocol_factory, server, backlog, + ssl, ssl_handshake_timeout, ssl_shutdown_timeout) + + try: + pipe._open(sock.fileno()) + except (KeyboardInterrupt, SystemExit): + raise + except BaseException: + pipe._close() + sock.close() + raise + + pipe._attach_fileobj(sock) + server._add_server(pipe) + + if start_serving: + server._start_serving() + + return server + + @cython.iterable_coroutine + async def create_unix_connection(self, protocol_factory, path=None, *, + ssl=None, sock=None, + server_hostname=None, + ssl_handshake_timeout=None, + ssl_shutdown_timeout=None): + + cdef: + UnixTransport tr + object app_protocol + object app_transport + object protocol + object ssl_waiter + + app_protocol = protocol = protocol_factory() + ssl_waiter = None + context = Context_CopyCurrent() + if ssl: + if server_hostname is None: + raise ValueError('You must set server_hostname ' + 'when using ssl without a host') + + ssl_waiter = self._new_future() + sslcontext = None if isinstance(ssl, bool) else ssl + protocol = SSLProtocol( + self, app_protocol, sslcontext, ssl_waiter, + False, server_hostname, + ssl_handshake_timeout=ssl_handshake_timeout, + ssl_shutdown_timeout=ssl_shutdown_timeout) + else: + if server_hostname is not None: + raise ValueError('server_hostname is only meaningful with ssl') + if ssl_handshake_timeout is not None: + raise ValueError( + 'ssl_handshake_timeout is only meaningful with ssl') + if ssl_shutdown_timeout is not None: + raise ValueError( + 'ssl_shutdown_timeout is only meaningful with ssl') + + if path is not None: + if sock is not None: + raise ValueError( + 'path and sock can not be specified at the same time') + + path = os_fspath(path) + + if isinstance(path, str): + path = PyUnicode_EncodeFSDefault(path) + + waiter = self._new_future() + tr = UnixTransport.new(self, protocol, None, waiter, context) + tr.connect(path) + try: + await waiter + except (KeyboardInterrupt, SystemExit): + raise + except BaseException: + tr._close() + raise + + else: + if sock is None: + raise ValueError('no path and sock were specified') + + if sock.family != uv.AF_UNIX or not _is_sock_stream(sock.type): + raise ValueError( + 'A UNIX Domain Stream Socket was expected, got {!r}' + .format(sock)) + + # libuv will set the socket to non-blocking mode, but + # we want Python socket object to notice that. + sock.setblocking(False) + + waiter = self._new_future() + tr = UnixTransport.new(self, protocol, None, waiter, context) + try: + tr._open(sock.fileno()) + tr._init_protocol() + await waiter + except (KeyboardInterrupt, SystemExit): + raise + except BaseException: + tr._close() + raise + + tr._attach_fileobj(sock) + + if ssl: + app_transport = protocol._get_app_transport(Context_CopyCurrent()) + try: + await ssl_waiter + except (KeyboardInterrupt, SystemExit): + raise + except BaseException: + app_transport.close() + raise + return app_transport, app_protocol + else: + return tr, protocol + + def default_exception_handler(self, context): + """Default exception handler. + + This is called when an exception occurs and no exception + handler is set, and can be called by a custom exception + handler that wants to defer to the default behavior. + + The context parameter has the same meaning as in + `call_exception_handler()`. + """ + message = context.get('message') + if not message: + message = 'Unhandled exception in event loop' + + exception = context.get('exception') + if exception is not None: + exc_info = (type(exception), exception, exception.__traceback__) + else: + exc_info = False + + log_lines = [message] + for key in sorted(context): + if key in {'message', 'exception'}: + continue + value = context[key] + if key == 'source_traceback': + tb = ''.join(tb_format_list(value)) + value = 'Object created at (most recent call last):\n' + value += tb.rstrip() + else: + try: + value = repr(value) + except (KeyboardInterrupt, SystemExit): + raise + except BaseException as ex: + value = ('Exception in __repr__ {!r}; ' + 'value type: {!r}'.format(ex, type(value))) + log_lines.append('{}: {}'.format(key, value)) + + aio_logger.error('\n'.join(log_lines), exc_info=exc_info) + + def get_exception_handler(self): + """Return an exception handler, or None if the default one is in use. + """ + return self._exception_handler + + def set_exception_handler(self, handler): + """Set handler as the new event loop exception handler. + + If handler is None, the default exception handler will + be set. + + If handler is a callable object, it should have a + signature matching '(loop, context)', where 'loop' + will be a reference to the active event loop, 'context' + will be a dict object (see `call_exception_handler()` + documentation for details about context). + """ + if handler is not None and not callable(handler): + raise TypeError('A callable object or None is expected, ' + 'got {!r}'.format(handler)) + self._exception_handler = handler + + def call_exception_handler(self, context): + """Call the current event loop's exception handler. + + The context argument is a dict containing the following keys: + + - 'message': Error message; + - 'exception' (optional): Exception object; + - 'future' (optional): Future instance; + - 'handle' (optional): Handle instance; + - 'protocol' (optional): Protocol instance; + - 'transport' (optional): Transport instance; + - 'socket' (optional): Socket instance. + + New keys maybe introduced in the future. + + Note: do not overload this method in an event loop subclass. + For custom exception handling, use the + `set_exception_handler()` method. + """ + if UVLOOP_DEBUG: + self._debug_exception_handler_cnt += 1 + + if self._exception_handler is None: + try: + self.default_exception_handler(context) + except (KeyboardInterrupt, SystemExit): + raise + except BaseException: + # Second protection layer for unexpected errors + # in the default implementation, as well as for subclassed + # event loops with overloaded "default_exception_handler". + aio_logger.error('Exception in default exception handler', + exc_info=True) + else: + try: + self._exception_handler(self, context) + except (KeyboardInterrupt, SystemExit): + raise + except BaseException as exc: + # Exception in the user set custom exception handler. + try: + # Let's try default handler. + self.default_exception_handler({ + 'message': 'Unhandled error in exception handler', + 'exception': exc, + 'context': context, + }) + except (KeyboardInterrupt, SystemExit): + raise + except BaseException: + # Guard 'default_exception_handler' in case it is + # overloaded. + aio_logger.error('Exception in default exception handler ' + 'while handling an unexpected error ' + 'in custom exception handler', + exc_info=True) + + def add_reader(self, fileobj, callback, *args): + """Add a reader callback.""" + if len(args) == 0: + args = None + self._add_reader(fileobj, new_Handle(self, callback, args, None)) + + def remove_reader(self, fileobj): + """Remove a reader callback.""" + self._remove_reader(fileobj) + + def add_writer(self, fileobj, callback, *args): + """Add a writer callback..""" + if len(args) == 0: + args = None + self._add_writer(fileobj, new_Handle(self, callback, args, None)) + + def remove_writer(self, fileobj): + """Remove a writer callback.""" + self._remove_writer(fileobj) + + @cython.iterable_coroutine + async def sock_recv(self, sock, n): + """Receive data from the socket. + + The return value is a bytes object representing the data received. + The maximum amount of data to be received at once is specified by + nbytes. + + This method is a coroutine. + """ + cdef: + Handle handle + + if self._debug and sock.gettimeout() != 0: + raise ValueError("the socket must be non-blocking") + + fut = _SyncSocketReaderFuture(sock, self) + handle = new_MethodHandle3( + self, + "Loop._sock_recv", + self._sock_recv, + None, + self, + fut, sock, n) + + self._add_reader(sock, handle) + return await fut + + @cython.iterable_coroutine + async def sock_recv_into(self, sock, buf): + """Receive data from the socket. + + The received data is written into *buf* (a writable buffer). + The return value is the number of bytes written. + + This method is a coroutine. + """ + cdef: + Handle handle + + if self._debug and sock.gettimeout() != 0: + raise ValueError("the socket must be non-blocking") + + fut = _SyncSocketReaderFuture(sock, self) + handle = new_MethodHandle3( + self, + "Loop._sock_recv_into", + self._sock_recv_into, + None, + self, + fut, sock, buf) + + self._add_reader(sock, handle) + return await fut + + @cython.iterable_coroutine + async def sock_sendall(self, sock, data): + """Send data to the socket. + + The socket must be connected to a remote socket. This method continues + to send data from data until either all data has been sent or an + error occurs. None is returned on success. On error, an exception is + raised, and there is no way to determine how much data, if any, was + successfully processed by the receiving end of the connection. + + This method is a coroutine. + """ + cdef: + Handle handle + ssize_t n + + if self._debug and sock.gettimeout() != 0: + raise ValueError("the socket must be non-blocking") + + if not data: + return + + socket_inc_io_ref(sock) + try: + try: + n = sock.send(data) + except (BlockingIOError, InterruptedError): + pass + else: + if UVLOOP_DEBUG: + # This can be a partial success, i.e. only part + # of the data was sent + self._sock_try_write_total += 1 + + if n == len(data): + return + if not isinstance(data, memoryview): + data = memoryview(data) + data = data[n:] + + fut = _SyncSocketWriterFuture(sock, self) + handle = new_MethodHandle3( + self, + "Loop._sock_sendall", + self._sock_sendall, + None, + self, + fut, sock, data) + + self._add_writer(sock, handle) + return await fut + finally: + socket_dec_io_ref(sock) + + @cython.iterable_coroutine + async def sock_accept(self, sock): + """Accept a connection. + + The socket must be bound to an address and listening for connections. + The return value is a pair (conn, address) where conn is a new socket + object usable to send and receive data on the connection, and address + is the address bound to the socket on the other end of the connection. + + This method is a coroutine. + """ + cdef: + Handle handle + + if self._debug and sock.gettimeout() != 0: + raise ValueError("the socket must be non-blocking") + + fut = _SyncSocketReaderFuture(sock, self) + handle = new_MethodHandle2( + self, + "Loop._sock_accept", + self._sock_accept, + None, + self, + fut, sock) + + self._add_reader(sock, handle) + return await fut + + @cython.iterable_coroutine + async def sock_connect(self, sock, address): + """Connect to a remote socket at address. + + This method is a coroutine. + """ + if self._debug and sock.gettimeout() != 0: + raise ValueError("the socket must be non-blocking") + + socket_inc_io_ref(sock) + try: + if sock.family == uv.AF_UNIX: + fut = self._sock_connect(sock, address) + else: + addrs = await self.getaddrinfo( + *address[:2], family=sock.family) + + _, _, _, _, address = addrs[0] + fut = self._sock_connect(sock, address) + if fut is not None: + await fut + finally: + socket_dec_io_ref(sock) + + @cython.iterable_coroutine + async def sock_recvfrom(self, sock, bufsize): + raise NotImplementedError + + @cython.iterable_coroutine + async def sock_recvfrom_into(self, sock, buf, nbytes=0): + raise NotImplementedError + + @cython.iterable_coroutine + async def sock_sendto(self, sock, data, address): + raise NotImplementedError + + @cython.iterable_coroutine + async def connect_accepted_socket(self, protocol_factory, sock, *, + ssl=None, + ssl_handshake_timeout=None, + ssl_shutdown_timeout=None): + """Handle an accepted connection. + + This is used by servers that accept connections outside of + asyncio but that use asyncio to handle connections. + + This method is a coroutine. When completed, the coroutine + returns a (transport, protocol) pair. + """ + + cdef: + UVStream transport = None + + if ssl is not None: + if not isinstance(ssl, ssl_SSLContext): + raise TypeError('ssl argument must be an SSLContext or None') + else: + if ssl_handshake_timeout is not None: + raise ValueError( + 'ssl_handshake_timeout is only meaningful with ssl') + if ssl_shutdown_timeout is not None: + raise ValueError( + 'ssl_shutdown_timeout is only meaningful with ssl') + + if not _is_sock_stream(sock.type): + raise ValueError( + 'A Stream Socket was expected, got {!r}'.format(sock)) + + app_protocol = protocol_factory() + waiter = self._new_future() + transport_waiter = None + context = Context_CopyCurrent() + + if ssl is None: + protocol = app_protocol + transport_waiter = waiter + else: + protocol = SSLProtocol( + self, app_protocol, ssl, waiter, + server_side=True, + server_hostname=None, + ssl_handshake_timeout=ssl_handshake_timeout, + ssl_shutdown_timeout=ssl_shutdown_timeout) + transport_waiter = None + + if sock.family == uv.AF_UNIX: + transport = UnixTransport.new( + self, protocol, None, transport_waiter, context) + elif sock.family in (uv.AF_INET, uv.AF_INET6): + transport = TCPTransport.new( + self, protocol, None, transport_waiter, context) + + if transport is None: + raise ValueError( + 'invalid socket family, expected AF_UNIX, AF_INET or AF_INET6') + + transport._open(sock.fileno()) + transport._init_protocol() + transport._attach_fileobj(sock) + + if ssl: + app_transport = protocol._get_app_transport(context) + try: + await waiter + except (KeyboardInterrupt, SystemExit): + raise + except BaseException: + app_transport.close() + raise + return app_transport, protocol + else: + try: + await waiter + except (KeyboardInterrupt, SystemExit): + raise + except BaseException: + transport._close() + raise + return transport, protocol + + def run_in_executor(self, executor, func, *args): + if aio_iscoroutine(func) or aio_iscoroutinefunction(func): + raise TypeError("coroutines cannot be used with run_in_executor()") + + self._check_closed() + + if executor is None: + executor = self._default_executor + # Only check when the default executor is being used + self._check_default_executor() + if executor is None: + executor = cc_ThreadPoolExecutor() + self._default_executor = executor + + return aio_wrap_future(executor.submit(func, *args), loop=self) + + def set_default_executor(self, executor): + self._default_executor = executor + + @cython.iterable_coroutine + async def __subprocess_run(self, protocol_factory, args, + stdin=subprocess_PIPE, + stdout=subprocess_PIPE, + stderr=subprocess_PIPE, + universal_newlines=False, + shell=True, + bufsize=0, + preexec_fn=None, + close_fds=None, + cwd=None, + env=None, + startupinfo=None, + creationflags=0, + restore_signals=True, + start_new_session=False, + executable=None, + pass_fds=(), + **kwargs): + + # TODO: Implement close_fds (might not be very important in + # Python 3.5, since all FDs aren't inheritable by default.) + + cdef: + int debug_flags = 0 + + if universal_newlines: + raise ValueError("universal_newlines must be False") + if bufsize != 0: + raise ValueError("bufsize must be 0") + if startupinfo is not None: + raise ValueError('startupinfo is not supported') + if creationflags != 0: + raise ValueError('creationflags is not supported') + + if executable is not None: + args[0] = executable + + # For tests only! Do not use in your code. Ever. + if kwargs.pop("__uvloop_sleep_after_fork", False): + debug_flags |= __PROCESS_DEBUG_SLEEP_AFTER_FORK + if kwargs: + raise ValueError( + 'unexpected kwargs: {}'.format(', '.join(kwargs.keys()))) + + waiter = self._new_future() + protocol = protocol_factory() + proc = UVProcessTransport.new(self, protocol, + args, env, cwd, start_new_session, + stdin, stdout, stderr, pass_fds, + waiter, + debug_flags, + preexec_fn, + restore_signals) + + try: + await waiter + except (KeyboardInterrupt, SystemExit): + raise + except BaseException: + proc.close() + raise + + return proc, protocol + + @cython.iterable_coroutine + async def subprocess_shell(self, protocol_factory, cmd, *, + shell=True, + **kwargs): + + if not shell: + raise ValueError("shell must be True") + + args = [cmd] + if shell: + args = [b'/bin/sh', b'-c'] + args + + return await self.__subprocess_run(protocol_factory, args, shell=True, + **kwargs) + + @cython.iterable_coroutine + async def subprocess_exec(self, protocol_factory, program, *args, + shell=False, **kwargs): + + if shell: + raise ValueError("shell must be False") + + args = list((program,) + args) + + return await self.__subprocess_run(protocol_factory, args, shell=False, + **kwargs) + + @cython.iterable_coroutine + async def connect_read_pipe(self, proto_factory, pipe): + """Register read pipe in event loop. Set the pipe to non-blocking mode. + + protocol_factory should instantiate object with Protocol interface. + pipe is a file-like object. + Return pair (transport, protocol), where transport supports the + ReadTransport interface.""" + cdef: + ReadUnixTransport transp + + waiter = self._new_future() + proto = proto_factory() + transp = ReadUnixTransport.new(self, proto, None, waiter) + transp._add_extra_info('pipe', pipe) + try: + transp._open(pipe.fileno()) + transp._init_protocol() + await waiter + except (KeyboardInterrupt, SystemExit): + raise + except BaseException: + transp._close() + raise + transp._attach_fileobj(pipe) + return transp, proto + + @cython.iterable_coroutine + async def connect_write_pipe(self, proto_factory, pipe): + """Register write pipe in event loop. + + protocol_factory should instantiate object with BaseProtocol interface. + Pipe is file-like object already switched to nonblocking. + Return pair (transport, protocol), where transport support + WriteTransport interface.""" + cdef: + WriteUnixTransport transp + + waiter = self._new_future() + proto = proto_factory() + transp = WriteUnixTransport.new(self, proto, None, waiter) + transp._add_extra_info('pipe', pipe) + try: + transp._open(pipe.fileno()) + transp._init_protocol() + await waiter + except (KeyboardInterrupt, SystemExit): + raise + except BaseException: + transp._close() + raise + transp._attach_fileobj(pipe) + return transp, proto + + def add_signal_handler(self, sig, callback, *args): + """Add a handler for a signal. UNIX only. + + Raise ValueError if the signal number is invalid or uncatchable. + Raise RuntimeError if there is a problem setting up the handler. + """ + cdef: + Handle h + + if not self._is_main_thread(): + raise ValueError( + 'add_signal_handler() can only be called from ' + 'the main thread') + + if (aio_iscoroutine(callback) + or aio_iscoroutinefunction(callback)): + raise TypeError( + "coroutines cannot be used with add_signal_handler()") + + if sig == uv.SIGCHLD: + if (hasattr(callback, '__self__') and + isinstance(callback.__self__, aio_AbstractChildWatcher)): + + warnings_warn( + "!!! asyncio is trying to install its ChildWatcher for " + "SIGCHLD signal !!!\n\nThis is probably because a uvloop " + "instance is used with asyncio.set_event_loop(). " + "The correct way to use uvloop is to install its policy: " + "`asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())`" + "\n\n", RuntimeWarning, source=self) + + # TODO: ideally we should always raise an error here, + # but that would be a backwards incompatible change, + # because we recommended using "asyncio.set_event_loop()" + # in our README. Need to start a deprecation period + # at some point to turn this warning into an error. + return + + raise RuntimeError( + 'cannot add a signal handler for SIGCHLD: it is used ' + 'by the event loop to track subprocesses') + + self._check_signal(sig) + self._check_closed() + + h = new_Handle(self, callback, args or None, None) + self._signal_handlers[sig] = h + + try: + # Register a dummy signal handler to ask Python to write the signal + # number in the wakeup file descriptor. + signal_signal(sig, self.__sighandler) + + # Set SA_RESTART to limit EINTR occurrences. + signal_siginterrupt(sig, False) + except OSError as exc: + del self._signal_handlers[sig] + if not self._signal_handlers: + try: + signal_set_wakeup_fd(-1) + except (ValueError, OSError) as nexc: + aio_logger.info('set_wakeup_fd(-1) failed: %s', nexc) + + if exc.errno == errno_EINVAL: + raise RuntimeError('sig {} cannot be caught'.format(sig)) + else: + raise + + def remove_signal_handler(self, sig): + """Remove a handler for a signal. UNIX only. + + Return True if a signal handler was removed, False if not. + """ + + if not self._is_main_thread(): + raise ValueError( + 'remove_signal_handler() can only be called from ' + 'the main thread') + + self._check_signal(sig) + + if not self._listening_signals: + return False + + try: + del self._signal_handlers[sig] + except KeyError: + return False + + if sig == uv.SIGINT: + handler = signal_default_int_handler + else: + handler = signal_SIG_DFL + + try: + signal_signal(sig, handler) + except OSError as exc: + if exc.errno == errno_EINVAL: + raise RuntimeError('sig {} cannot be caught'.format(sig)) + else: + raise + + return True + + @cython.iterable_coroutine + async def create_datagram_endpoint(self, protocol_factory, + local_addr=None, remote_addr=None, *, + family=0, proto=0, flags=0, + reuse_address=_unset, reuse_port=None, + allow_broadcast=None, sock=None): + """A coroutine which creates a datagram endpoint. + + This method will try to establish the endpoint in the background. + When successful, the coroutine returns a (transport, protocol) pair. + + protocol_factory must be a callable returning a protocol instance. + + socket family AF_INET or socket.AF_INET6 depending on host (or + family if specified), socket type SOCK_DGRAM. + + reuse_port tells the kernel to allow this endpoint to be bound to + the same port as other existing endpoints are bound to, so long as + they all set this flag when being created. This option is not + supported on Windows and some UNIX's. If the + :py:data:`~socket.SO_REUSEPORT` constant is not defined then this + capability is unsupported. + + allow_broadcast tells the kernel to allow this endpoint to send + messages to the broadcast address. + + sock can optionally be specified in order to use a preexisting + socket object. + """ + cdef: + UDPTransport udp = None + system.addrinfo * lai + system.addrinfo * rai + + if sock is not None: + if not _is_sock_dgram(sock.type): + raise ValueError( + 'A UDP Socket was expected, got {!r}'.format(sock)) + if (local_addr or remote_addr or + family or proto or flags or + reuse_port or allow_broadcast): + # show the problematic kwargs in exception msg + opts = dict(local_addr=local_addr, remote_addr=remote_addr, + family=family, proto=proto, flags=flags, + reuse_address=reuse_address, reuse_port=reuse_port, + allow_broadcast=allow_broadcast) + problems = ', '.join( + '{}={}'.format(k, v) for k, v in opts.items() if v) + raise ValueError( + 'socket modifier keyword arguments can not be used ' + 'when sock is specified. ({})'.format(problems)) + sock.setblocking(False) + udp = UDPTransport.__new__(UDPTransport) + udp._init(self, uv.AF_UNSPEC) + udp.open(sock.family, sock.fileno()) + udp._attach_fileobj(sock) + else: + if reuse_address is not _unset: + if reuse_address: + raise ValueError("Passing `reuse_address=True` is no " + "longer supported, as the usage of " + "SO_REUSEPORT in UDP poses a significant " + "security concern.") + else: + warnings_warn("The *reuse_address* parameter has been " + "deprecated as of 0.15.", DeprecationWarning, + stacklevel=2) + reuse_port = bool(reuse_port) + if reuse_port and not has_SO_REUSEPORT: + raise ValueError( + 'reuse_port not supported by socket module') + + lads = None + if local_addr is not None: + if (not isinstance(local_addr, (tuple, list)) or + len(local_addr) != 2): + raise TypeError( + 'local_addr must be a tuple of (host, port)') + lads = await self._getaddrinfo( + local_addr[0], local_addr[1], + family, uv.SOCK_DGRAM, proto, flags, + 0) + + rads = None + if remote_addr is not None: + if (not isinstance(remote_addr, (tuple, list)) or + len(remote_addr) != 2): + raise TypeError( + 'remote_addr must be a tuple of (host, port)') + rads = await self._getaddrinfo( + remote_addr[0], remote_addr[1], + family, uv.SOCK_DGRAM, proto, flags, + 0) + + excs = [] + if lads is None: + if rads is not None: + udp = UDPTransport.__new__(UDPTransport) + rai = (rads).data + udp._init(self, rai.ai_family) + udp._connect(rai.ai_addr, rai.ai_addrlen) + udp._set_address(rai) + else: + if family not in (uv.AF_INET, uv.AF_INET6): + raise ValueError('unexpected address family') + udp = UDPTransport.__new__(UDPTransport) + udp._init(self, family) + + if reuse_port: + self._sock_set_reuseport(udp._fileno()) + + else: + lai = (lads).data + while lai is not NULL: + try: + udp = UDPTransport.__new__(UDPTransport) + udp._init(self, lai.ai_family) + if reuse_port: + self._sock_set_reuseport(udp._fileno()) + udp._bind(lai.ai_addr) + except (KeyboardInterrupt, SystemExit): + raise + except BaseException as ex: + lai = lai.ai_next + excs.append(ex) + continue + else: + break + else: + ctx = None + if len(excs): + ctx = excs[0] + raise OSError('could not bind to local_addr {}'.format( + local_addr)) from ctx + + if rads is not None: + rai = (rads).data + while rai is not NULL: + if rai.ai_family != lai.ai_family: + rai = rai.ai_next + continue + if rai.ai_protocol != lai.ai_protocol: + rai = rai.ai_next + continue + udp._connect(rai.ai_addr, rai.ai_addrlen) + udp._set_address(rai) + break + else: + raise OSError( + 'could not bind to remote_addr {}'.format( + remote_addr)) + + if allow_broadcast: + udp._set_broadcast(1) + + protocol = protocol_factory() + waiter = self._new_future() + assert udp is not None + udp._set_protocol(protocol) + udp._set_waiter(waiter) + udp._init_protocol() + + await waiter + return udp, protocol + + def _monitor_fs(self, path: str, callback) -> asyncio.Handle: + cdef: + UVFSEvent fs_handle + char* c_str_path + + self._check_closed() + fs_handle = UVFSEvent.new(self, callback, None) + p_bytes = path.encode('UTF-8') + c_str_path = p_bytes + flags = 0 + fs_handle.start(c_str_path, flags) + return fs_handle + + def _check_default_executor(self): + if self._executor_shutdown_called: + raise RuntimeError('Executor shutdown has been called') + + def _asyncgen_finalizer_hook(self, agen): + self._asyncgens.discard(agen) + if not self.is_closed(): + self.call_soon_threadsafe(self.create_task, agen.aclose()) + + def _asyncgen_firstiter_hook(self, agen): + if self._asyncgens_shutdown_called: + warnings_warn( + "asynchronous generator {!r} was scheduled after " + "loop.shutdown_asyncgens() call".format(agen), + ResourceWarning, source=self) + + self._asyncgens.add(agen) + + @cython.iterable_coroutine + async def shutdown_asyncgens(self): + """Shutdown all active asynchronous generators.""" + self._asyncgens_shutdown_called = True + + if not len(self._asyncgens): + return + + closing_agens = list(self._asyncgens) + self._asyncgens.clear() + + shutdown_coro = aio_gather( + *[ag.aclose() for ag in closing_agens], + return_exceptions=True) + + results = await shutdown_coro + for result, agen in zip(results, closing_agens): + if isinstance(result, Exception): + self.call_exception_handler({ + 'message': 'an error occurred during closing of ' + 'asynchronous generator {!r}'.format(agen), + 'exception': result, + 'asyncgen': agen + }) + + @cython.iterable_coroutine + async def shutdown_default_executor(self, timeout=None): + """Schedule the shutdown of the default executor. + + The timeout parameter specifies the amount of time the executor will + be given to finish joining. The default value is None, which means + that the executor will be given an unlimited amount of time. + """ + self._executor_shutdown_called = True + if self._default_executor is None: + return + future = self.create_future() + thread = threading_Thread(target=self._do_shutdown, args=(future,)) + thread.start() + try: + await future + finally: + thread.join(timeout) + + if thread.is_alive(): + warnings_warn( + "The executor did not finishing joining " + f"its threads within {timeout} seconds.", + RuntimeWarning, + stacklevel=2 + ) + self._default_executor.shutdown(wait=False) + + def _do_shutdown(self, future): + try: + self._default_executor.shutdown(wait=True) + self.call_soon_threadsafe(future.set_result, None) + except Exception as ex: + self.call_soon_threadsafe(future.set_exception, ex) + + +# Expose pointer for integration with other C-extensions +def libuv_get_loop_t_ptr(loop): + return PyCapsule_New((loop).uvloop, NULL, NULL) + + +def libuv_get_version(): + return uv.uv_version() + + +def _testhelper_unwrap_capsuled_pointer(obj): + return PyCapsule_GetPointer(obj, NULL) + + +cdef void __loop_alloc_buffer( + uv.uv_handle_t* uvhandle, + size_t suggested_size, + uv.uv_buf_t* buf +) noexcept with gil: + cdef: + Loop loop = (uvhandle.data)._loop + + if loop._recv_buffer_in_use == 1: + buf.len = 0 + exc = RuntimeError('concurrent allocations') + loop._handle_exception(exc) + return + + loop._recv_buffer_in_use = 1 + buf.base = loop._recv_buffer + buf.len = sizeof(loop._recv_buffer) + + +cdef inline void __loop_free_buffer(Loop loop): + loop._recv_buffer_in_use = 0 + + +class _SyncSocketReaderFuture(aio_Future): + + def __init__(self, sock, loop): + aio_Future.__init__(self, loop=loop) + self.__sock = sock + self.__loop = loop + + def __remove_reader(self): + if self.__sock is not None and self.__sock.fileno() != -1: + self.__loop.remove_reader(self.__sock) + self.__sock = None + + if PY39: + def cancel(self, msg=None): + self.__remove_reader() + aio_Future.cancel(self, msg=msg) + + else: + def cancel(self): + self.__remove_reader() + aio_Future.cancel(self) + + +class _SyncSocketWriterFuture(aio_Future): + + def __init__(self, sock, loop): + aio_Future.__init__(self, loop=loop) + self.__sock = sock + self.__loop = loop + + def __remove_writer(self): + if self.__sock is not None and self.__sock.fileno() != -1: + self.__loop.remove_writer(self.__sock) + self.__sock = None + + if PY39: + def cancel(self, msg=None): + self.__remove_writer() + aio_Future.cancel(self, msg=msg) + + else: + def cancel(self): + self.__remove_writer() + aio_Future.cancel(self) + + +include "cbhandles.pyx" +include "pseudosock.pyx" +include "lru.pyx" + +include "handles/handle.pyx" +include "handles/async_.pyx" +include "handles/idle.pyx" +include "handles/check.pyx" +include "handles/timer.pyx" +include "handles/poll.pyx" +include "handles/basetransport.pyx" +include "handles/stream.pyx" +include "handles/streamserver.pyx" +include "handles/tcp.pyx" +include "handles/pipe.pyx" +include "handles/process.pyx" +include "handles/fsevent.pyx" + +include "request.pyx" +include "dns.pyx" +include "sslproto.pyx" + +include "handles/udp.pyx" + +include "server.pyx" + + +# Used in UVProcess +cdef vint __atfork_installed = 0 +cdef vint __forking = 0 +cdef Loop __forking_loop = None + + +cdef void __get_fork_handler() noexcept nogil: + with gil: + if (__forking and __forking_loop is not None and + __forking_loop.active_process_handler is not None): + __forking_loop.active_process_handler._after_fork() + +cdef __install_atfork(): + global __atfork_installed + + if __atfork_installed: + return + __atfork_installed = 1 + + cdef int err + + err = system.pthread_atfork(NULL, NULL, &system.handleAtFork) + if err: + __atfork_installed = 0 + raise convert_error(-err) + + +# Install PyMem* memory allocators +cdef vint __mem_installed = 0 +cdef __install_pymem(): + global __mem_installed + if __mem_installed: + return + __mem_installed = 1 + + cdef int err + err = uv.uv_replace_allocator(PyMem_RawMalloc, + PyMem_RawRealloc, + PyMem_RawCalloc, + PyMem_RawFree) + if err < 0: + __mem_installed = 0 + raise convert_error(err) + + +cdef _set_signal_wakeup_fd(fd): + if fd >= 0: + return signal_set_wakeup_fd(fd, warn_on_full_buffer=False) + else: + return signal_set_wakeup_fd(fd) + + +# Helpers for tests + +@cython.iterable_coroutine +async def _test_coroutine_1(): + return 42 diff --git a/hackaton/lib/python3.12/site-packages/uvloop/lru.pyx b/hackaton/lib/python3.12/site-packages/uvloop/lru.pyx new file mode 100644 index 0000000..cc7ea1d --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/uvloop/lru.pyx @@ -0,0 +1,79 @@ +cdef object _LRU_MARKER = object() + + +@cython.final +cdef class LruCache: + + cdef: + object _dict + int _maxsize + object _dict_move_to_end + object _dict_get + + # We use an OrderedDict for LRU implementation. Operations: + # + # * We use a simple `__setitem__` to push a new entry: + # `entries[key] = new_entry` + # That will push `new_entry` to the *end* of the entries dict. + # + # * When we have a cache hit, we call + # `entries.move_to_end(key, last=True)` + # to move the entry to the *end* of the entries dict. + # + # * When we need to remove entries to maintain `max_size`, we call + # `entries.popitem(last=False)` + # to remove an entry from the *beginning* of the entries dict. + # + # So new entries and hits are always promoted to the end of the + # entries dict, whereas the unused one will group in the + # beginning of it. + + def __init__(self, *, maxsize): + if maxsize <= 0: + raise ValueError( + f'maxsize is expected to be greater than 0, got {maxsize}') + + self._dict = col_OrderedDict() + self._dict_move_to_end = self._dict.move_to_end + self._dict_get = self._dict.get + self._maxsize = maxsize + + cdef get(self, key, default): + o = self._dict_get(key, _LRU_MARKER) + if o is _LRU_MARKER: + return default + self._dict_move_to_end(key) # last=True + return o + + cdef inline needs_cleanup(self): + return len(self._dict) > self._maxsize + + cdef inline cleanup_one(self): + k, _ = self._dict.popitem(last=False) + return k + + def __getitem__(self, key): + o = self._dict[key] + self._dict_move_to_end(key) # last=True + return o + + def __setitem__(self, key, o): + if key in self._dict: + self._dict[key] = o + self._dict_move_to_end(key) # last=True + else: + self._dict[key] = o + while self.needs_cleanup(): + self.cleanup_one() + + def __delitem__(self, key): + del self._dict[key] + + def __contains__(self, key): + return key in self._dict + + def __len__(self): + return len(self._dict) + + def __iter__(self): + return iter(self._dict) diff --git a/hackaton/lib/python3.12/site-packages/uvloop/pseudosock.pyx b/hackaton/lib/python3.12/site-packages/uvloop/pseudosock.pyx new file mode 100644 index 0000000..10a1ad6 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/uvloop/pseudosock.pyx @@ -0,0 +1,209 @@ +cdef class PseudoSocket: + cdef: + int _family + int _type + int _proto + int _fd + object _peername + object _sockname + + def __init__(self, int family, int type, int proto, int fd): + self._family = family + self._type = type + self._proto = proto + self._fd = fd + self._peername = None + self._sockname = None + + cdef _na(self, what): + raise TypeError('transport sockets do not support {}'.format(what)) + + cdef _make_sock(self): + return socket_socket(self._family, self._type, self._proto, self._fd) + + property family: + def __get__(self): + try: + return socket_AddressFamily(self._family) + except ValueError: + return self._family + + property type: + def __get__(self): + try: + return socket_SocketKind(self._type) + except ValueError: + return self._type + + property proto: + def __get__(self): + return self._proto + + def __repr__(self): + s = ("self.request.data is not self: + raise RuntimeError( + '{}.cancel: .request.data is not UVRequest'.format( + self.__class__.__name__)) + + # We only can cancel pending requests. Let's try. + err = uv.uv_cancel(self.request) + if err < 0: + if err == uv.UV_EBUSY: + # Can't close the request -- it's executing (see the first + # comment). Loop will have to wait until the callback + # fires. + pass + elif err == uv.UV_EINVAL: + # From libuv docs: + # + # Only cancellation of uv_fs_t, uv_getaddrinfo_t, + # uv_getnameinfo_t and uv_work_t requests is currently + # supported. + return + else: + ex = convert_error(err) + self.loop._handle_exception(ex) diff --git a/hackaton/lib/python3.12/site-packages/uvloop/server.pxd b/hackaton/lib/python3.12/site-packages/uvloop/server.pxd new file mode 100644 index 0000000..ef10f81 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/uvloop/server.pxd @@ -0,0 +1,19 @@ +cdef class Server: + cdef: + list _servers + list _waiters + int _active_count + Loop _loop + bint _serving + object _serving_forever_fut + object __weakref__ + + cdef _add_server(self, UVStreamServer srv) + cdef _start_serving(self) + cdef _wakeup(self) + + cdef _attach(self) + cdef _detach(self) + + cdef _ref(self) + cdef _unref(self) diff --git a/hackaton/lib/python3.12/site-packages/uvloop/server.pyx b/hackaton/lib/python3.12/site-packages/uvloop/server.pyx new file mode 100644 index 0000000..845bcfd --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/uvloop/server.pyx @@ -0,0 +1,136 @@ +import asyncio + + +cdef class Server: + def __cinit__(self, Loop loop): + self._loop = loop + self._servers = [] + self._waiters = [] + self._active_count = 0 + self._serving_forever_fut = None + + cdef _add_server(self, UVStreamServer srv): + self._servers.append(srv) + + cdef _start_serving(self): + if self._serving: + return + + self._serving = 1 + for server in self._servers: + (server).listen() + + cdef _wakeup(self): + cdef list waiters + + waiters = self._waiters + self._waiters = None + for waiter in waiters: + if not waiter.done(): + waiter.set_result(waiter) + + cdef _attach(self): + assert self._servers is not None + self._active_count += 1 + + cdef _detach(self): + assert self._active_count > 0 + self._active_count -= 1 + if self._active_count == 0 and self._servers is None: + self._wakeup() + + cdef _ref(self): + # Keep the server object alive while it's not explicitly closed. + self._loop._servers.add(self) + + cdef _unref(self): + self._loop._servers.discard(self) + + # Public API + + @cython.iterable_coroutine + async def __aenter__(self): + return self + + @cython.iterable_coroutine + async def __aexit__(self, *exc): + self.close() + await self.wait_closed() + + def __repr__(self): + return '<%s sockets=%r>' % (self.__class__.__name__, self.sockets) + + def get_loop(self): + return self._loop + + @cython.iterable_coroutine + async def wait_closed(self): + # Do not remove `self._servers is None` below + # because close() method only closes server sockets + # and existing client connections are left open. + if self._servers is None or self._waiters is None: + return + waiter = self._loop._new_future() + self._waiters.append(waiter) + await waiter + + def close(self): + cdef list servers + + if self._servers is None: + return + + try: + servers = self._servers + self._servers = None + self._serving = 0 + + for server in servers: + (server)._close() + + if self._active_count == 0: + self._wakeup() + finally: + self._unref() + + def is_serving(self): + return self._serving + + @cython.iterable_coroutine + async def start_serving(self): + self._start_serving() + + @cython.iterable_coroutine + async def serve_forever(self): + if self._serving_forever_fut is not None: + raise RuntimeError( + f'server {self!r} is already being awaited on serve_forever()') + if self._servers is None: + raise RuntimeError(f'server {self!r} is closed') + + self._start_serving() + self._serving_forever_fut = self._loop.create_future() + + try: + await self._serving_forever_fut + except asyncio.CancelledError: + try: + self.close() + await self.wait_closed() + finally: + raise + finally: + self._serving_forever_fut = None + + property sockets: + def __get__(self): + cdef list sockets = [] + + # Guard against `self._servers is None` + if self._servers: + for server in self._servers: + sockets.append( + (server)._get_socket() + ) + + return sockets diff --git a/hackaton/lib/python3.12/site-packages/uvloop/sslproto.pxd b/hackaton/lib/python3.12/site-packages/uvloop/sslproto.pxd new file mode 100644 index 0000000..3da10f0 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/uvloop/sslproto.pxd @@ -0,0 +1,138 @@ +cdef enum SSLProtocolState: + UNWRAPPED = 0 + DO_HANDSHAKE = 1 + WRAPPED = 2 + FLUSHING = 3 + SHUTDOWN = 4 + + +cdef enum AppProtocolState: + # This tracks the state of app protocol (https://git.io/fj59P): + # + # INIT -cm-> CON_MADE [-dr*->] [-er-> EOF?] -cl-> CON_LOST + # + # * cm: connection_made() + # * dr: data_received() + # * er: eof_received() + # * cl: connection_lost() + + STATE_INIT = 0 + STATE_CON_MADE = 1 + STATE_EOF = 2 + STATE_CON_LOST = 3 + + +cdef class _SSLProtocolTransport: + cdef: + Loop _loop + SSLProtocol _ssl_protocol + bint _closed + object context + + +cdef class SSLProtocol: + cdef: + bint _server_side + str _server_hostname + object _sslcontext + + object _extra + + object _write_backlog + size_t _write_buffer_size + + object _waiter + Loop _loop + _SSLProtocolTransport _app_transport + bint _app_transport_created + + object _transport + object _ssl_handshake_timeout + object _ssl_shutdown_timeout + + object _sslobj + object _sslobj_read + object _sslobj_write + object _incoming + object _incoming_write + object _outgoing + object _outgoing_read + char* _ssl_buffer + size_t _ssl_buffer_len + object _ssl_buffer_view + SSLProtocolState _state + size_t _conn_lost + AppProtocolState _app_state + + bint _ssl_writing_paused + bint _app_reading_paused + + size_t _incoming_high_water + size_t _incoming_low_water + bint _ssl_reading_paused + + bint _app_writing_paused + size_t _outgoing_high_water + size_t _outgoing_low_water + + object _app_protocol + bint _app_protocol_is_buffer + object _app_protocol_get_buffer + object _app_protocol_buffer_updated + + object _handshake_start_time + object _handshake_timeout_handle + object _shutdown_timeout_handle + + cdef _set_app_protocol(self, app_protocol) + cdef _wakeup_waiter(self, exc=*) + cdef _get_extra_info(self, name, default=*) + cdef _set_state(self, SSLProtocolState new_state) + + # Handshake flow + + cdef _start_handshake(self) + cdef _check_handshake_timeout(self) + cdef _do_handshake(self) + cdef _on_handshake_complete(self, handshake_exc) + + # Shutdown flow + + cdef _start_shutdown(self, object context=*) + cdef _check_shutdown_timeout(self) + cdef _do_read_into_void(self, object context) + cdef _do_flush(self, object context=*) + cdef _do_shutdown(self, object context=*) + cdef _on_shutdown_complete(self, shutdown_exc) + cdef _abort(self, exc) + + # Outgoing flow + + cdef _write_appdata(self, list_of_data, object context) + cdef _do_write(self) + cdef _process_outgoing(self) + + # Incoming flow + + cdef _do_read(self) + cdef _do_read__buffered(self) + cdef _do_read__copied(self) + cdef _call_eof_received(self, object context=*) + + # Flow control for writes from APP socket + + cdef _control_app_writing(self, object context=*) + cdef size_t _get_write_buffer_size(self) + cdef _set_write_buffer_limits(self, high=*, low=*) + + # Flow control for reads to APP socket + + cdef _pause_reading(self) + cdef _resume_reading(self, object context) + + # Flow control for reads from SSL socket + + cdef _control_ssl_reading(self) + cdef _set_read_buffer_limits(self, high=*, low=*) + cdef size_t _get_read_buffer_size(self) + cdef _fatal_error(self, exc, message=*) diff --git a/hackaton/lib/python3.12/site-packages/uvloop/sslproto.pyx b/hackaton/lib/python3.12/site-packages/uvloop/sslproto.pyx new file mode 100644 index 0000000..42bb764 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/uvloop/sslproto.pyx @@ -0,0 +1,950 @@ +cdef _create_transport_context(server_side, server_hostname): + if server_side: + raise ValueError('Server side SSL needs a valid SSLContext') + + # Client side may pass ssl=True to use a default + # context; in that case the sslcontext passed is None. + # The default is secure for client connections. + # Python 3.4+: use up-to-date strong settings. + sslcontext = ssl_create_default_context() + if not server_hostname: + sslcontext.check_hostname = False + return sslcontext + + +cdef class _SSLProtocolTransport: + + # TODO: + # _sendfile_compatible = constants._SendfileMode.FALLBACK + + def __cinit__(self, Loop loop, ssl_protocol, context): + self._loop = loop + # SSLProtocol instance + self._ssl_protocol = ssl_protocol + self._closed = False + if context is None: + context = Context_CopyCurrent() + self.context = context + + def get_extra_info(self, name, default=None): + """Get optional transport information.""" + return self._ssl_protocol._get_extra_info(name, default) + + def set_protocol(self, protocol): + self._ssl_protocol._set_app_protocol(protocol) + + def get_protocol(self): + return self._ssl_protocol._app_protocol + + def is_closing(self): + return self._closed + + def close(self): + """Close the transport. + + Buffered data will be flushed asynchronously. No more data + will be received. After all buffered data is flushed, the + protocol's connection_lost() method will (eventually) called + with None as its argument. + """ + self._closed = True + self._ssl_protocol._start_shutdown(self.context.copy()) + + def __dealloc__(self): + if not self._closed: + self._closed = True + warnings_warn( + "unclosed transport ", ResourceWarning) + + def is_reading(self): + return not self._ssl_protocol._app_reading_paused + + def pause_reading(self): + """Pause the receiving end. + + No data will be passed to the protocol's data_received() + method until resume_reading() is called. + """ + self._ssl_protocol._pause_reading() + + def resume_reading(self): + """Resume the receiving end. + + Data received will once again be passed to the protocol's + data_received() method. + """ + self._ssl_protocol._resume_reading(self.context.copy()) + + def set_write_buffer_limits(self, high=None, low=None): + """Set the high- and low-water limits for write flow control. + + These two values control when to call the protocol's + pause_writing() and resume_writing() methods. If specified, + the low-water limit must be less than or equal to the + high-water limit. Neither value can be negative. + + The defaults are implementation-specific. If only the + high-water limit is given, the low-water limit defaults to an + implementation-specific value less than or equal to the + high-water limit. Setting high to zero forces low to zero as + well, and causes pause_writing() to be called whenever the + buffer becomes non-empty. Setting low to zero causes + resume_writing() to be called only once the buffer is empty. + Use of zero for either limit is generally sub-optimal as it + reduces opportunities for doing I/O and computation + concurrently. + """ + self._ssl_protocol._set_write_buffer_limits(high, low) + self._ssl_protocol._control_app_writing(self.context.copy()) + + def get_write_buffer_limits(self): + return (self._ssl_protocol._outgoing_low_water, + self._ssl_protocol._outgoing_high_water) + + def get_write_buffer_size(self): + """Return the current size of the write buffers.""" + return self._ssl_protocol._get_write_buffer_size() + + def set_read_buffer_limits(self, high=None, low=None): + """Set the high- and low-water limits for read flow control. + + These two values control when to call the upstream transport's + pause_reading() and resume_reading() methods. If specified, + the low-water limit must be less than or equal to the + high-water limit. Neither value can be negative. + + The defaults are implementation-specific. If only the + high-water limit is given, the low-water limit defaults to an + implementation-specific value less than or equal to the + high-water limit. Setting high to zero forces low to zero as + well, and causes pause_reading() to be called whenever the + buffer becomes non-empty. Setting low to zero causes + resume_reading() to be called only once the buffer is empty. + Use of zero for either limit is generally sub-optimal as it + reduces opportunities for doing I/O and computation + concurrently. + """ + self._ssl_protocol._set_read_buffer_limits(high, low) + self._ssl_protocol._control_ssl_reading() + + def get_read_buffer_limits(self): + return (self._ssl_protocol._incoming_low_water, + self._ssl_protocol._incoming_high_water) + + def get_read_buffer_size(self): + """Return the current size of the read buffer.""" + return self._ssl_protocol._get_read_buffer_size() + + @property + def _protocol_paused(self): + # Required for sendfile fallback pause_writing/resume_writing logic + return self._ssl_protocol._app_writing_paused + + def write(self, data): + """Write some data bytes to the transport. + + This does not block; it buffers the data and arranges for it + to be sent out asynchronously. + """ + if not isinstance(data, (bytes, bytearray, memoryview)): + raise TypeError(f"data: expecting a bytes-like instance, " + f"got {type(data).__name__}") + if not data: + return + self._ssl_protocol._write_appdata((data,), self.context.copy()) + + def writelines(self, list_of_data): + """Write a list (or any iterable) of data bytes to the transport. + + The default implementation concatenates the arguments and + calls write() on the result. + """ + self._ssl_protocol._write_appdata(list_of_data, self.context.copy()) + + def write_eof(self): + """Close the write end after flushing buffered data. + + This raises :exc:`NotImplementedError` right now. + """ + raise NotImplementedError + + def can_write_eof(self): + """Return True if this transport supports write_eof(), False if not.""" + return False + + def abort(self): + """Close the transport immediately. + + Buffered data will be lost. No more data will be received. + The protocol's connection_lost() method will (eventually) be + called with None as its argument. + """ + self._force_close(None) + + def _force_close(self, exc): + self._closed = True + self._ssl_protocol._abort(exc) + + def _test__append_write_backlog(self, data): + # for test only + self._ssl_protocol._write_backlog.append(data) + self._ssl_protocol._write_buffer_size += len(data) + + +cdef class SSLProtocol: + """SSL protocol. + + Implementation of SSL on top of a socket using incoming and outgoing + buffers which are ssl.MemoryBIO objects. + """ + + def __cinit__(self, *args, **kwargs): + self._ssl_buffer_len = SSL_READ_MAX_SIZE + self._ssl_buffer = PyMem_RawMalloc(self._ssl_buffer_len) + if not self._ssl_buffer: + raise MemoryError() + self._ssl_buffer_view = PyMemoryView_FromMemory( + self._ssl_buffer, self._ssl_buffer_len, PyBUF_WRITE) + + def __dealloc__(self): + self._ssl_buffer_view = None + PyMem_RawFree(self._ssl_buffer) + self._ssl_buffer = NULL + self._ssl_buffer_len = 0 + + def __init__(self, loop, app_protocol, sslcontext, waiter, + server_side=False, server_hostname=None, + call_connection_made=True, + ssl_handshake_timeout=None, + ssl_shutdown_timeout=None): + if ssl_handshake_timeout is None: + ssl_handshake_timeout = SSL_HANDSHAKE_TIMEOUT + elif ssl_handshake_timeout <= 0: + raise ValueError( + f"ssl_handshake_timeout should be a positive number, " + f"got {ssl_handshake_timeout}") + if ssl_shutdown_timeout is None: + ssl_shutdown_timeout = SSL_SHUTDOWN_TIMEOUT + elif ssl_shutdown_timeout <= 0: + raise ValueError( + f"ssl_shutdown_timeout should be a positive number, " + f"got {ssl_shutdown_timeout}") + + if not sslcontext: + sslcontext = _create_transport_context( + server_side, server_hostname) + + self._server_side = server_side + if server_hostname and not server_side: + self._server_hostname = server_hostname + else: + self._server_hostname = None + self._sslcontext = sslcontext + # SSL-specific extra info. More info are set when the handshake + # completes. + self._extra = dict(sslcontext=sslcontext) + + # App data write buffering + self._write_backlog = col_deque() + self._write_buffer_size = 0 + + self._waiter = waiter + self._loop = loop + self._set_app_protocol(app_protocol) + self._app_transport = None + self._app_transport_created = False + # transport, ex: SelectorSocketTransport + self._transport = None + self._ssl_handshake_timeout = ssl_handshake_timeout + self._ssl_shutdown_timeout = ssl_shutdown_timeout + # SSL and state machine + self._sslobj = None + self._incoming = ssl_MemoryBIO() + self._incoming_write = self._incoming.write + self._outgoing = ssl_MemoryBIO() + self._outgoing_read = self._outgoing.read + self._state = UNWRAPPED + self._conn_lost = 0 # Set when connection_lost called + if call_connection_made: + self._app_state = STATE_INIT + else: + self._app_state = STATE_CON_MADE + + # Flow Control + + self._ssl_writing_paused = False + + self._app_reading_paused = False + + self._ssl_reading_paused = False + self._incoming_high_water = 0 + self._incoming_low_water = 0 + self._set_read_buffer_limits() + + self._app_writing_paused = False + self._outgoing_high_water = 0 + self._outgoing_low_water = 0 + self._set_write_buffer_limits() + + cdef _set_app_protocol(self, app_protocol): + self._app_protocol = app_protocol + if (hasattr(app_protocol, 'get_buffer') and + not isinstance(app_protocol, aio_Protocol)): + self._app_protocol_get_buffer = app_protocol.get_buffer + self._app_protocol_buffer_updated = app_protocol.buffer_updated + self._app_protocol_is_buffer = True + else: + self._app_protocol_is_buffer = False + + cdef _wakeup_waiter(self, exc=None): + if self._waiter is None: + return + if not self._waiter.cancelled(): + if exc is not None: + self._waiter.set_exception(exc) + else: + self._waiter.set_result(None) + self._waiter = None + + def _get_app_transport(self, context=None): + if self._app_transport is None: + if self._app_transport_created: + raise RuntimeError('Creating _SSLProtocolTransport twice') + self._app_transport = _SSLProtocolTransport(self._loop, self, + context) + self._app_transport_created = True + return self._app_transport + + def connection_made(self, transport): + """Called when the low-level connection is made. + + Start the SSL handshake. + """ + self._transport = transport + self._start_handshake() + + def connection_lost(self, exc): + """Called when the low-level connection is lost or closed. + + The argument is an exception object or None (the latter + meaning a regular EOF is received or the connection was + aborted or closed). + """ + self._write_backlog.clear() + self._outgoing_read() + self._conn_lost += 1 + + # Just mark the app transport as closed so that its __dealloc__ + # doesn't complain. + if self._app_transport is not None: + self._app_transport._closed = True + + if self._state != DO_HANDSHAKE: + if self._app_state == STATE_CON_MADE or \ + self._app_state == STATE_EOF: + self._app_state = STATE_CON_LOST + self._loop.call_soon(self._app_protocol.connection_lost, exc) + self._set_state(UNWRAPPED) + self._transport = None + self._app_transport = None + self._app_protocol = None + self._wakeup_waiter(exc) + + if self._shutdown_timeout_handle: + self._shutdown_timeout_handle.cancel() + self._shutdown_timeout_handle = None + if self._handshake_timeout_handle: + self._handshake_timeout_handle.cancel() + self._handshake_timeout_handle = None + + def get_buffer(self, n): + cdef size_t want = n + if want > SSL_READ_MAX_SIZE: + want = SSL_READ_MAX_SIZE + if self._ssl_buffer_len < want: + self._ssl_buffer = PyMem_RawRealloc(self._ssl_buffer, want) + if not self._ssl_buffer: + raise MemoryError() + self._ssl_buffer_len = want + self._ssl_buffer_view = PyMemoryView_FromMemory( + self._ssl_buffer, want, PyBUF_WRITE) + return self._ssl_buffer_view + + def buffer_updated(self, nbytes): + self._incoming_write(PyMemoryView_FromMemory( + self._ssl_buffer, nbytes, PyBUF_WRITE)) + + if self._state == DO_HANDSHAKE: + self._do_handshake() + + elif self._state == WRAPPED: + self._do_read() + + elif self._state == FLUSHING: + self._do_flush() + + elif self._state == SHUTDOWN: + self._do_shutdown() + + def eof_received(self): + """Called when the other end of the low-level stream + is half-closed. + + If this returns a false value (including None), the transport + will close itself. If it returns a true value, closing the + transport is up to the protocol. + """ + try: + if self._loop.get_debug(): + aio_logger.debug("%r received EOF", self) + + if self._state == DO_HANDSHAKE: + self._on_handshake_complete(ConnectionResetError) + + elif self._state == WRAPPED or self._state == FLUSHING: + # We treat a low-level EOF as a critical situation similar to a + # broken connection - just send whatever is in the buffer and + # close. No application level eof_received() is called - + # because we don't want the user to think that this is a + # graceful shutdown triggered by SSL "close_notify". + self._set_state(SHUTDOWN) + self._on_shutdown_complete(None) + + elif self._state == SHUTDOWN: + self._on_shutdown_complete(None) + + except Exception: + self._transport.close() + raise + + cdef _get_extra_info(self, name, default=None): + if name == 'uvloop.sslproto': + return self + elif name in self._extra: + return self._extra[name] + elif self._transport is not None: + return self._transport.get_extra_info(name, default) + else: + return default + + cdef _set_state(self, SSLProtocolState new_state): + cdef bint allowed = False + + if new_state == UNWRAPPED: + allowed = True + + elif self._state == UNWRAPPED and new_state == DO_HANDSHAKE: + allowed = True + + elif self._state == DO_HANDSHAKE and new_state == WRAPPED: + allowed = True + + elif self._state == WRAPPED and new_state == FLUSHING: + allowed = True + + elif self._state == WRAPPED and new_state == SHUTDOWN: + allowed = True + + elif self._state == FLUSHING and new_state == SHUTDOWN: + allowed = True + + if allowed: + self._state = new_state + + else: + raise RuntimeError( + 'cannot switch state from {} to {}'.format( + self._state, new_state)) + + # Handshake flow + + cdef _start_handshake(self): + if self._loop.get_debug(): + aio_logger.debug("%r starts SSL handshake", self) + self._handshake_start_time = self._loop.time() + else: + self._handshake_start_time = None + + self._set_state(DO_HANDSHAKE) + + # start handshake timeout count down + self._handshake_timeout_handle = \ + self._loop.call_later(self._ssl_handshake_timeout, + lambda: self._check_handshake_timeout()) + + try: + self._sslobj = self._sslcontext.wrap_bio( + self._incoming, self._outgoing, + server_side=self._server_side, + server_hostname=self._server_hostname) + self._sslobj_read = self._sslobj.read + self._sslobj_write = self._sslobj.write + except Exception as ex: + self._on_handshake_complete(ex) + else: + self._do_handshake() + + cdef _check_handshake_timeout(self): + if self._state == DO_HANDSHAKE: + msg = ( + f"SSL handshake is taking longer than " + f"{self._ssl_handshake_timeout} seconds: " + f"aborting the connection" + ) + self._fatal_error(ConnectionAbortedError(msg)) + + cdef _do_handshake(self): + try: + self._sslobj.do_handshake() + except ssl_SSLAgainErrors as exc: + self._process_outgoing() + except ssl_SSLError as exc: + self._on_handshake_complete(exc) + else: + self._on_handshake_complete(None) + + cdef _on_handshake_complete(self, handshake_exc): + if self._handshake_timeout_handle is not None: + self._handshake_timeout_handle.cancel() + self._handshake_timeout_handle = None + + sslobj = self._sslobj + try: + if handshake_exc is None: + self._set_state(WRAPPED) + else: + raise handshake_exc + + peercert = sslobj.getpeercert() + except Exception as exc: + self._set_state(UNWRAPPED) + if isinstance(exc, ssl_CertificateError): + msg = 'SSL handshake failed on verifying the certificate' + else: + msg = 'SSL handshake failed' + self._fatal_error(exc, msg) + self._wakeup_waiter(exc) + return + + if self._loop.get_debug(): + dt = self._loop.time() - self._handshake_start_time + aio_logger.debug("%r: SSL handshake took %.1f ms", self, dt * 1e3) + + # Add extra info that becomes available after handshake. + self._extra.update(peercert=peercert, + cipher=sslobj.cipher(), + compression=sslobj.compression(), + ssl_object=sslobj) + if self._app_state == STATE_INIT: + self._app_state = STATE_CON_MADE + self._app_protocol.connection_made(self._get_app_transport()) + self._wakeup_waiter() + + # We should wakeup user code before sending the first data below. In + # case of `start_tls()`, the user can only get the SSLTransport in the + # wakeup callback, because `connection_made()` is not called again. + # We should schedule the first data later than the wakeup callback so + # that the user get a chance to e.g. check ALPN with the transport + # before having to handle the first data. + self._loop._call_soon_handle( + new_MethodHandle(self._loop, + "SSLProtocol._do_read", + self._do_read, + None, # current context is good + self)) + + # Shutdown flow + + cdef _start_shutdown(self, object context=None): + if self._state in (FLUSHING, SHUTDOWN, UNWRAPPED): + return + # we don't need the context for _abort or the timeout, because + # TCP transport._force_close() should be able to call + # connection_lost() in the right context + if self._app_transport is not None: + self._app_transport._closed = True + if self._state == DO_HANDSHAKE: + self._abort(None) + else: + self._set_state(FLUSHING) + self._shutdown_timeout_handle = \ + self._loop.call_later(self._ssl_shutdown_timeout, + lambda: self._check_shutdown_timeout()) + self._do_flush(context) + + cdef _check_shutdown_timeout(self): + if self._state in (FLUSHING, SHUTDOWN): + self._transport._force_close( + aio_TimeoutError('SSL shutdown timed out')) + + cdef _do_read_into_void(self, object context): + """Consume and discard incoming application data. + + If close_notify is received for the first time, call eof_received. + """ + cdef: + bint close_notify = False + try: + while True: + if not self._sslobj_read(SSL_READ_MAX_SIZE): + close_notify = True + break + except ssl_SSLAgainErrors as exc: + pass + except ssl_SSLZeroReturnError: + close_notify = True + if close_notify: + self._call_eof_received(context) + + cdef _do_flush(self, object context=None): + """Flush the write backlog, discarding new data received. + + We don't send close_notify in FLUSHING because we still want to send + the remaining data over SSL, even if we received a close_notify. Also, + no application-level resume_writing() or pause_writing() will be called + in FLUSHING, as we could fully manage the flow control internally. + """ + try: + self._do_read_into_void(context) + self._do_write() + self._process_outgoing() + self._control_ssl_reading() + except Exception as ex: + self._on_shutdown_complete(ex) + else: + if not self._get_write_buffer_size(): + self._set_state(SHUTDOWN) + self._do_shutdown(context) + + cdef _do_shutdown(self, object context=None): + """Send close_notify and wait for the same from the peer.""" + try: + # we must skip all application data (if any) before unwrap + self._do_read_into_void(context) + try: + self._sslobj.unwrap() + except ssl_SSLAgainErrors as exc: + self._process_outgoing() + else: + self._process_outgoing() + if not self._get_write_buffer_size(): + self._on_shutdown_complete(None) + except Exception as ex: + self._on_shutdown_complete(ex) + + cdef _on_shutdown_complete(self, shutdown_exc): + if self._shutdown_timeout_handle is not None: + self._shutdown_timeout_handle.cancel() + self._shutdown_timeout_handle = None + + # we don't need the context here because TCP transport.close() should + # be able to call connection_made() in the right context + if shutdown_exc: + self._fatal_error(shutdown_exc, 'Error occurred during shutdown') + else: + self._transport.close() + + cdef _abort(self, exc): + self._set_state(UNWRAPPED) + if self._transport is not None: + self._transport._force_close(exc) + + # Outgoing flow + + cdef _write_appdata(self, list_of_data, object context): + if self._state in (FLUSHING, SHUTDOWN, UNWRAPPED): + if self._conn_lost >= LOG_THRESHOLD_FOR_CONNLOST_WRITES: + aio_logger.warning('SSL connection is closed') + self._conn_lost += 1 + return + + for data in list_of_data: + self._write_backlog.append(data) + self._write_buffer_size += len(data) + + try: + if self._state == WRAPPED: + self._do_write() + self._process_outgoing() + self._control_app_writing(context) + + except Exception as ex: + self._fatal_error(ex, 'Fatal error on SSL protocol') + + cdef _do_write(self): + """Do SSL write, consumes write backlog and fills outgoing BIO.""" + cdef size_t data_len, count + try: + while self._write_backlog: + data = self._write_backlog[0] + count = self._sslobj_write(data) + data_len = len(data) + if count < data_len: + if not PyMemoryView_Check(data): + data = PyMemoryView_FromObject(data) + self._write_backlog[0] = data[count:] + self._write_buffer_size -= count + else: + del self._write_backlog[0] + self._write_buffer_size -= data_len + except ssl_SSLAgainErrors as exc: + pass + + cdef _process_outgoing(self): + """Send bytes from the outgoing BIO.""" + if not self._ssl_writing_paused: + data = self._outgoing_read() + if len(data): + self._transport.write(data) + + # Incoming flow + + cdef _do_read(self): + if self._state != WRAPPED: + return + try: + if not self._app_reading_paused: + if self._app_protocol_is_buffer: + self._do_read__buffered() + else: + self._do_read__copied() + if self._write_backlog: + self._do_write() + self._process_outgoing() + self._control_app_writing() + self._control_ssl_reading() + except Exception as ex: + self._fatal_error(ex, 'Fatal error on SSL protocol') + + cdef _do_read__buffered(self): + cdef: + Py_buffer pybuf + bint pybuf_inited = False + size_t wants, offset = 0 + int count = 1 + object buf + + buf = self._app_protocol_get_buffer(self._get_read_buffer_size()) + wants = len(buf) + + try: + count = self._sslobj_read(wants, buf) + + if count > 0: + offset = count + if offset < wants: + PyObject_GetBuffer(buf, &pybuf, PyBUF_WRITABLE) + pybuf_inited = True + while offset < wants: + buf = PyMemoryView_FromMemory( + (pybuf.buf) + offset, + wants - offset, + PyBUF_WRITE) + count = self._sslobj_read(wants - offset, buf) + if count > 0: + offset += count + else: + break + else: + self._loop._call_soon_handle( + new_MethodHandle(self._loop, + "SSLProtocol._do_read", + self._do_read, + None, # current context is good + self)) + except ssl_SSLAgainErrors as exc: + pass + finally: + if pybuf_inited: + PyBuffer_Release(&pybuf) + if offset > 0: + self._app_protocol_buffer_updated(offset) + if not count: + # close_notify + self._call_eof_received() + self._start_shutdown() + + cdef _do_read__copied(self): + cdef: + list data + bytes first, chunk = b'1' + bint zero = True, one = False + + try: + while True: + chunk = self._sslobj_read(SSL_READ_MAX_SIZE) + if not chunk: + break + if zero: + zero = False + one = True + first = chunk + elif one: + one = False + data = [first, chunk] + else: + data.append(chunk) + except ssl_SSLAgainErrors as exc: + pass + if one: + self._app_protocol.data_received(first) + elif not zero: + self._app_protocol.data_received(b''.join(data)) + if not chunk: + # close_notify + self._call_eof_received() + self._start_shutdown() + + cdef _call_eof_received(self, object context=None): + if self._app_state == STATE_CON_MADE: + self._app_state = STATE_EOF + try: + if context is None: + # If the caller didn't provide a context, we assume the + # caller is already in the right context, which is usually + # inside the upstream callbacks like buffer_updated() + keep_open = self._app_protocol.eof_received() + else: + keep_open = run_in_context( + context, self._app_protocol.eof_received, + ) + except (KeyboardInterrupt, SystemExit): + raise + except BaseException as ex: + self._fatal_error(ex, 'Error calling eof_received()') + else: + if keep_open: + aio_logger.warning('returning true from eof_received() ' + 'has no effect when using ssl') + + # Flow control for writes from APP socket + + cdef _control_app_writing(self, object context=None): + cdef size_t size = self._get_write_buffer_size() + if size >= self._outgoing_high_water and not self._app_writing_paused: + self._app_writing_paused = True + try: + if context is None: + # If the caller didn't provide a context, we assume the + # caller is already in the right context, which is usually + # inside the upstream callbacks like buffer_updated() + self._app_protocol.pause_writing() + else: + run_in_context(context, self._app_protocol.pause_writing) + except (KeyboardInterrupt, SystemExit): + raise + except BaseException as exc: + self._loop.call_exception_handler({ + 'message': 'protocol.pause_writing() failed', + 'exception': exc, + 'transport': self._app_transport, + 'protocol': self, + }) + elif size <= self._outgoing_low_water and self._app_writing_paused: + self._app_writing_paused = False + try: + if context is None: + # If the caller didn't provide a context, we assume the + # caller is already in the right context, which is usually + # inside the upstream callbacks like resume_writing() + self._app_protocol.resume_writing() + else: + run_in_context(context, self._app_protocol.resume_writing) + except (KeyboardInterrupt, SystemExit): + raise + except BaseException as exc: + self._loop.call_exception_handler({ + 'message': 'protocol.resume_writing() failed', + 'exception': exc, + 'transport': self._app_transport, + 'protocol': self, + }) + + cdef size_t _get_write_buffer_size(self): + return self._outgoing.pending + self._write_buffer_size + + cdef _set_write_buffer_limits(self, high=None, low=None): + high, low = add_flowcontrol_defaults( + high, low, FLOW_CONTROL_HIGH_WATER_SSL_WRITE) + self._outgoing_high_water = high + self._outgoing_low_water = low + + # Flow control for reads to APP socket + + cdef _pause_reading(self): + self._app_reading_paused = True + + cdef _resume_reading(self, object context): + if self._app_reading_paused: + self._app_reading_paused = False + if self._state == WRAPPED: + self._loop._call_soon_handle( + new_MethodHandle(self._loop, + "SSLProtocol._do_read", + self._do_read, + context, + self)) + + # Flow control for reads from SSL socket + + cdef _control_ssl_reading(self): + cdef size_t size = self._get_read_buffer_size() + if size >= self._incoming_high_water and not self._ssl_reading_paused: + self._ssl_reading_paused = True + self._transport.pause_reading() + elif size <= self._incoming_low_water and self._ssl_reading_paused: + self._ssl_reading_paused = False + self._transport.resume_reading() + + cdef _set_read_buffer_limits(self, high=None, low=None): + high, low = add_flowcontrol_defaults( + high, low, FLOW_CONTROL_HIGH_WATER_SSL_READ) + self._incoming_high_water = high + self._incoming_low_water = low + + cdef size_t _get_read_buffer_size(self): + return self._incoming.pending + + # Flow control for writes to SSL socket + + def pause_writing(self): + """Called when the low-level transport's buffer goes over + the high-water mark. + """ + assert not self._ssl_writing_paused + self._ssl_writing_paused = True + + def resume_writing(self): + """Called when the low-level transport's buffer drains below + the low-water mark. + """ + assert self._ssl_writing_paused + self._ssl_writing_paused = False + + if self._state == WRAPPED: + self._process_outgoing() + self._control_app_writing() + + elif self._state == FLUSHING: + self._do_flush() + + elif self._state == SHUTDOWN: + self._do_shutdown() + + cdef _fatal_error(self, exc, message='Fatal error on transport'): + if self._app_transport: + self._app_transport._force_close(exc) + elif self._transport: + self._transport._force_close(exc) + + if isinstance(exc, OSError): + if self._loop.get_debug(): + aio_logger.debug("%r: %s", self, message, exc_info=True) + elif not isinstance(exc, aio_CancelledError): + self._loop.call_exception_handler({ + 'message': message, + 'exception': exc, + 'transport': self._transport, + 'protocol': self, + }) diff --git a/hackaton/lib/python3.12/site-packages/watchfiles-0.24.0.dist-info/INSTALLER b/hackaton/lib/python3.12/site-packages/watchfiles-0.24.0.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/watchfiles-0.24.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/hackaton/lib/python3.12/site-packages/watchfiles-0.24.0.dist-info/METADATA b/hackaton/lib/python3.12/site-packages/watchfiles-0.24.0.dist-info/METADATA new file mode 100644 index 0000000..c8cfde6 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/watchfiles-0.24.0.dist-info/METADATA @@ -0,0 +1,152 @@ +Metadata-Version: 2.3 +Name: watchfiles +Version: 0.24.0 +Classifier: Development Status :: 5 - Production/Stable +Classifier: Environment :: Console +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: Information Technology +Classifier: Intended Audience :: System Administrators +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: POSIX :: Linux +Classifier: Operating System :: Microsoft :: Windows +Classifier: Operating System :: MacOS +Classifier: Environment :: MacOS X +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: System :: Filesystems +Classifier: Framework :: AnyIO +Requires-Dist: anyio >=3.0.0 +License-File: LICENSE +Summary: Simple, modern and high performance file watching and code reload in python. +Home-Page: https://github.com/samuelcolvin/watchfiles +Author-email: Samuel Colvin +License: MIT +Requires-Python: >=3.8 +Description-Content-Type: text/markdown; charset=UTF-8; variant=GFM +Project-URL: Homepage, https://github.com/samuelcolvin/watchfiles +Project-URL: Documentation, https://watchfiles.helpmanual.io +Project-URL: Funding, https://github.com/sponsors/samuelcolvin +Project-URL: Source, https://github.com/samuelcolvin/watchfiles +Project-URL: Changelog, https://github.com/samuelcolvin/watchfiles/releases + +# watchfiles + +[![CI](https://github.com/samuelcolvin/watchfiles/workflows/ci/badge.svg?event=push)](https://github.com/samuelcolvin/watchfiles/actions?query=event%3Apush+branch%3Amain+workflow%3Aci) +[![Coverage](https://codecov.io/gh/samuelcolvin/watchfiles/branch/main/graph/badge.svg)](https://codecov.io/gh/samuelcolvin/watchfiles) +[![pypi](https://img.shields.io/pypi/v/watchfiles.svg)](https://pypi.python.org/pypi/watchfiles) +[![CondaForge](https://img.shields.io/conda/v/conda-forge/watchfiles.svg)](https://anaconda.org/conda-forge/watchfiles) +[![license](https://img.shields.io/github/license/samuelcolvin/watchfiles.svg)](https://github.com/samuelcolvin/watchfiles/blob/main/LICENSE) + +Simple, modern and high performance file watching and code reload in python. + +--- + +**Documentation**: [watchfiles.helpmanual.io](https://watchfiles.helpmanual.io) + +**Source Code**: [github.com/samuelcolvin/watchfiles](https://github.com/samuelcolvin/watchfiles) + +--- + +Underlying file system notifications are handled by the [Notify](https://github.com/notify-rs/notify) rust library. + +This package was previously named "watchgod", +see [the migration guide](https://watchfiles.helpmanual.io/migrating/) for more information. + +## Installation + +**watchfiles** requires Python 3.8 - 3.13. + +```bash +pip install watchfiles +``` + +Binaries are available for: + +* **Linux**: `x86_64`, `aarch64`, `i686`, `armv7l`, `musl-x86_64` & `musl-aarch64` +* **MacOS**: `x86_64` & `arm64` +* **Windows**: `amd64` & `win32` + +Otherwise, you can install from source which requires Rust stable to be installed. + +## Usage + +Here are some examples of what **watchfiles** can do: + +### `watch` Usage + +```py +from watchfiles import watch + +for changes in watch('./path/to/dir'): + print(changes) +``` +See [`watch` docs](https://watchfiles.helpmanual.io/api/watch/#watchfiles.watch) for more details. + +### `awatch` Usage + +```py +import asyncio +from watchfiles import awatch + +async def main(): + async for changes in awatch('/path/to/dir'): + print(changes) + +asyncio.run(main()) +``` +See [`awatch` docs](https://watchfiles.helpmanual.io/api/watch/#watchfiles.awatch) for more details. + +### `run_process` Usage + +```py +from watchfiles import run_process + +def foobar(a, b, c): + ... + +if __name__ == '__main__': + run_process('./path/to/dir', target=foobar, args=(1, 2, 3)) +``` +See [`run_process` docs](https://watchfiles.helpmanual.io/api/run_process/#watchfiles.run_process) for more details. + +### `arun_process` Usage + +```py +import asyncio +from watchfiles import arun_process + +def foobar(a, b, c): + ... + +async def main(): + await arun_process('./path/to/dir', target=foobar, args=(1, 2, 3)) + +if __name__ == '__main__': + asyncio.run(main()) +``` +See [`arun_process` docs](https://watchfiles.helpmanual.io/api/run_process/#watchfiles.arun_process) for more details. + +## CLI + +**watchfiles** also comes with a CLI for running and reloading code. To run `some command` when files in `src` change: + +``` +watchfiles "some command" src +``` + +For more information, see [the CLI docs](https://watchfiles.helpmanual.io/cli/). + +Or run + +```bash +watchfiles --help +``` + diff --git a/hackaton/lib/python3.12/site-packages/watchfiles-0.24.0.dist-info/RECORD b/hackaton/lib/python3.12/site-packages/watchfiles-0.24.0.dist-info/RECORD new file mode 100644 index 0000000..ab56505 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/watchfiles-0.24.0.dist-info/RECORD @@ -0,0 +1,24 @@ +../../../bin/watchfiles,sha256=z-Om75o0IJKjme3ewjzS1qRTxXRyutPn59NzlSei79w,238 +watchfiles-0.24.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +watchfiles-0.24.0.dist-info/METADATA,sha256=guQwHLWXgFXyJiJjw9v-wGM4etUFg3IZ8vxSckvJplQ,4923 +watchfiles-0.24.0.dist-info/RECORD,, +watchfiles-0.24.0.dist-info/WHEEL,sha256=Kb7KizsPlTG3o6hyGw8mrqJSDjTczKr2vylB9aPcUXc,104 +watchfiles-0.24.0.dist-info/entry_points.txt,sha256=s1Dpa2d_KKBy-jKREWW60Z3GoRZ3JpCEo_9iYDt6hOQ,48 +watchfiles-0.24.0.dist-info/licenses/LICENSE,sha256=Nrb5inpC3jnhTxxutZgxzblMwRsF7q0xyB-4-FHRdQs,1110 +watchfiles/__init__.py,sha256=IRlM9KOSedMzF1fvLr7yEHPVS-UFERNThlB-tmWI8yU,364 +watchfiles/__main__.py,sha256=JgErYkiskih8Y6oRwowALtR-rwQhAAdqOYWjQraRIPI,59 +watchfiles/__pycache__/__init__.cpython-312.pyc,, +watchfiles/__pycache__/__main__.cpython-312.pyc,, +watchfiles/__pycache__/cli.cpython-312.pyc,, +watchfiles/__pycache__/filters.cpython-312.pyc,, +watchfiles/__pycache__/main.cpython-312.pyc,, +watchfiles/__pycache__/run.cpython-312.pyc,, +watchfiles/__pycache__/version.cpython-312.pyc,, +watchfiles/_rust_notify.cpython-312-darwin.so,sha256=f31PxoSY2MTkSOz-MiaTsuRZte6zbVaL1VF1RLj4KhY,880776 +watchfiles/_rust_notify.pyi,sha256=q5FQkXgBJEFPt9RCf7my4wP5RM1FwSVpqf221csyebg,4753 +watchfiles/cli.py,sha256=DHMI0LfT7hOrWai_Y4RP_vvTvVdtcDaioixXLiv2pG4,7707 +watchfiles/filters.py,sha256=U0zXGOeg9dMHkT51-56BKpRrWIu95lPq0HDR_ZB4oDE,5139 +watchfiles/main.py,sha256=Lai6LxxW0kY8JpbrTbQMb47_xpdNve-z0KjQ3hQ3rLs,14360 +watchfiles/py.typed,sha256=MS4Na3to9VTGPy_8wBQM_6mNKaX4qIpi5-w7_LZB-8I,69 +watchfiles/run.py,sha256=llxWtt2GHy-0OjR0ZDW5ksNdB2Wl3XIfKu5DcMgdxYM,15350 +watchfiles/version.py,sha256=NRWUnkZ32DamsNKV20EetagIGTLDMMUnqDWVGFFA2WQ,85 diff --git a/hackaton/lib/python3.12/site-packages/watchfiles-0.24.0.dist-info/WHEEL b/hackaton/lib/python3.12/site-packages/watchfiles-0.24.0.dist-info/WHEEL new file mode 100644 index 0000000..7f300df --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/watchfiles-0.24.0.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: maturin (1.7.1) +Root-Is-Purelib: false +Tag: cp312-cp312-macosx_11_0_arm64 diff --git a/hackaton/lib/python3.12/site-packages/watchfiles-0.24.0.dist-info/entry_points.txt b/hackaton/lib/python3.12/site-packages/watchfiles-0.24.0.dist-info/entry_points.txt new file mode 100644 index 0000000..5164296 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/watchfiles-0.24.0.dist-info/entry_points.txt @@ -0,0 +1,2 @@ +[console_scripts] +watchfiles=watchfiles.cli:cli diff --git a/hackaton/lib/python3.12/site-packages/watchfiles-0.24.0.dist-info/licenses/LICENSE b/hackaton/lib/python3.12/site-packages/watchfiles-0.24.0.dist-info/licenses/LICENSE new file mode 100644 index 0000000..08c9a8d --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/watchfiles-0.24.0.dist-info/licenses/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2017, 2018, 2019, 2020, 2021, 2022 Samuel Colvin + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/hackaton/lib/python3.12/site-packages/watchfiles/__init__.py b/hackaton/lib/python3.12/site-packages/watchfiles/__init__.py new file mode 100644 index 0000000..877fbd5 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/watchfiles/__init__.py @@ -0,0 +1,17 @@ +from .filters import BaseFilter, DefaultFilter, PythonFilter +from .main import Change, awatch, watch +from .run import arun_process, run_process +from .version import VERSION + +__version__ = VERSION +__all__ = ( + 'watch', + 'awatch', + 'run_process', + 'arun_process', + 'Change', + 'BaseFilter', + 'DefaultFilter', + 'PythonFilter', + 'VERSION', +) diff --git a/hackaton/lib/python3.12/site-packages/watchfiles/__main__.py b/hackaton/lib/python3.12/site-packages/watchfiles/__main__.py new file mode 100644 index 0000000..d396c2a --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/watchfiles/__main__.py @@ -0,0 +1,4 @@ +from .cli import cli + +if __name__ == '__main__': + cli() diff --git a/hackaton/lib/python3.12/site-packages/watchfiles/__pycache__/__init__.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/watchfiles/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..92e80c0 Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/watchfiles/__pycache__/__init__.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/watchfiles/__pycache__/__main__.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/watchfiles/__pycache__/__main__.cpython-312.pyc new file mode 100644 index 0000000..562be7e Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/watchfiles/__pycache__/__main__.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/watchfiles/__pycache__/cli.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/watchfiles/__pycache__/cli.cpython-312.pyc new file mode 100644 index 0000000..f9dfec6 Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/watchfiles/__pycache__/cli.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/watchfiles/__pycache__/filters.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/watchfiles/__pycache__/filters.cpython-312.pyc new file mode 100644 index 0000000..a97f8eb Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/watchfiles/__pycache__/filters.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/watchfiles/__pycache__/main.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/watchfiles/__pycache__/main.cpython-312.pyc new file mode 100644 index 0000000..11e8303 Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/watchfiles/__pycache__/main.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/watchfiles/__pycache__/run.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/watchfiles/__pycache__/run.cpython-312.pyc new file mode 100644 index 0000000..5e50d92 Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/watchfiles/__pycache__/run.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/watchfiles/__pycache__/version.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/watchfiles/__pycache__/version.cpython-312.pyc new file mode 100644 index 0000000..14142c3 Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/watchfiles/__pycache__/version.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/watchfiles/_rust_notify.cpython-312-darwin.so b/hackaton/lib/python3.12/site-packages/watchfiles/_rust_notify.cpython-312-darwin.so new file mode 100755 index 0000000..945598a Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/watchfiles/_rust_notify.cpython-312-darwin.so differ diff --git a/hackaton/lib/python3.12/site-packages/watchfiles/_rust_notify.pyi b/hackaton/lib/python3.12/site-packages/watchfiles/_rust_notify.pyi new file mode 100644 index 0000000..e08cfff --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/watchfiles/_rust_notify.pyi @@ -0,0 +1,111 @@ +from typing import Any, Literal, Protocol + +__all__ = 'RustNotify', 'WatchfilesRustInternalError' + +__version__: str +"""The package version as defined in `Cargo.toml`, modified to match python's versioning semantics.""" + +class AbstractEvent(Protocol): + def is_set(self) -> bool: ... + +class RustNotify: + """ + Interface to the Rust [notify](https://crates.io/crates/notify) crate which does + the heavy lifting of watching for file changes and grouping them into events. + """ + + def __init__( + self, + watch_paths: list[str], + debug: bool, + force_polling: bool, + poll_delay_ms: int, + recursive: bool, + ignore_permission_denied: bool, + ) -> None: + """ + Create a new `RustNotify` instance and start a thread to watch for changes. + + `FileNotFoundError` is raised if any of the paths do not exist. + + Args: + watch_paths: file system paths to watch for changes, can be directories or files + debug: if true, print details about all events to stderr + force_polling: if true, always use polling instead of file system notifications + poll_delay_ms: delay between polling for changes, only used if `force_polling=True` + recursive: if `True`, watch for changes in sub-directories recursively, otherwise watch only for changes in + the top-level directory, default is `True`. + ignore_permission_denied: if `True`, permission denied errors are ignored while watching changes. + """ + def watch( + self, + debounce_ms: int, + step_ms: int, + timeout_ms: int, + stop_event: AbstractEvent | None, + ) -> set[tuple[int, str]] | Literal['signal', 'stop', 'timeout']: + """ + Watch for changes. + + This method will wait `timeout_ms` milliseconds for changes, but once a change is detected, + it will group changes and return in no more than `debounce_ms` milliseconds. + + The GIL is released during a `step_ms` sleep on each iteration to avoid + blocking python. + + Args: + debounce_ms: maximum time in milliseconds to group changes over before returning. + step_ms: time to wait for new changes in milliseconds, if no changes are detected + in this time, and at least one change has been detected, the changes are yielded. + timeout_ms: maximum time in milliseconds to wait for changes before returning, + `0` means wait indefinitely, `debounce_ms` takes precedence over `timeout_ms` once + a change is detected. + stop_event: event to check on every iteration to see if this function should return early. + The event should be an object which has an `is_set()` method which returns a boolean. + + Returns: + See below. + + Return values have the following meanings: + + * Change details as a `set` of `(event_type, path)` tuples, the event types are ints which match + [`Change`][watchfiles.Change], `path` is a string representing the path of the file that changed + * `'signal'` string, if a signal was received + * `'stop'` string, if the `stop_event` was set + * `'timeout'` string, if `timeout_ms` was exceeded + """ + def __enter__(self) -> RustNotify: + """ + Does nothing, but allows `RustNotify` to be used as a context manager. + + !!! note + + The watching thead is created when an instance is initiated, not on `__enter__`. + """ + def __exit__(self, *args: Any) -> None: + """ + Calls [`close`][watchfiles._rust_notify.RustNotify.close]. + """ + def close(self) -> None: + """ + Stops the watching thread. After `close` is called, the `RustNotify` instance can no + longer be used, calls to [`watch`][watchfiles._rust_notify.RustNotify.watch] will raise a `RuntimeError`. + + !!! note + + `close` is not required, just deleting the `RustNotify` instance will kill the thread + implicitly. + + As per [#163](https://github.com/samuelcolvin/watchfiles/issues/163) `close()` is only required because + in the event of an error, the traceback in `sys.exc_info` keeps a reference to `watchfiles.watch`'s + frame, so you can't rely on the `RustNotify` object being deleted, and thereby stopping + the watching thread. + """ + +class WatchfilesRustInternalError(RuntimeError): + """ + Raised when RustNotify encounters an unknown error. + + If you get this a lot, please check [github](https://github.com/samuelcolvin/watchfiles/issues) issues + and create a new issue if your problem is not discussed. + """ diff --git a/hackaton/lib/python3.12/site-packages/watchfiles/cli.py b/hackaton/lib/python3.12/site-packages/watchfiles/cli.py new file mode 100644 index 0000000..f1e1ddd --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/watchfiles/cli.py @@ -0,0 +1,224 @@ +import argparse +import logging +import os +import shlex +import sys +from pathlib import Path +from textwrap import dedent +from typing import Any, Callable, List, Optional, Tuple, Union, cast + +from . import Change +from .filters import BaseFilter, DefaultFilter, PythonFilter +from .run import detect_target_type, import_string, run_process +from .version import VERSION + +logger = logging.getLogger('watchfiles.cli') + + +def resolve_path(path_str: str) -> Path: + path = Path(path_str) + if not path.exists(): + raise FileNotFoundError(path) + else: + return path.resolve() + + +def cli(*args_: str) -> None: + """ + Watch one or more directories and execute either a shell command or a python function on file changes. + + Example of watching the current directory and calling a python function: + + watchfiles foobar.main + + Example of watching python files in two local directories and calling a shell command: + + watchfiles --filter python 'pytest --lf' src tests + + See https://watchfiles.helpmanual.io/cli/ for more information. + """ + args = args_ or sys.argv[1:] + parser = argparse.ArgumentParser( + prog='watchfiles', + description=dedent((cli.__doc__ or '').strip('\n')), + formatter_class=argparse.RawTextHelpFormatter, + ) + parser.add_argument('target', help='Command or dotted function path to run') + parser.add_argument( + 'paths', nargs='*', default='.', help='Filesystem paths to watch, defaults to current directory' + ) + + parser.add_argument( + '--ignore-paths', + nargs='?', + type=str, + help=( + 'Specify directories to ignore, ' + 'to ignore multiple paths use a comma as separator, e.g. "env" or "env,node_modules"' + ), + ) + parser.add_argument( + '--target-type', + nargs='?', + type=str, + default='auto', + choices=['command', 'function', 'auto'], + help=( + 'Whether the target should be intercepted as a shell command or a python function, ' + 'defaults to "auto" which infers the target type from the target string' + ), + ) + parser.add_argument( + '--filter', + nargs='?', + type=str, + default='default', + help=( + 'Which files to watch, defaults to "default" which uses the "DefaultFilter", ' + '"python" uses the "PythonFilter", "all" uses no filter, ' + 'any other value is interpreted as a python function/class path which is imported' + ), + ) + parser.add_argument( + '--args', + nargs='?', + type=str, + help='Arguments to set on sys.argv before calling target function, used only if the target is a function', + ) + parser.add_argument('--verbose', action='store_true', help='Set log level to "debug", wins over `--verbosity`') + parser.add_argument( + '--non-recursive', action='store_true', help='Do not watch for changes in sub-directories recursively' + ) + parser.add_argument( + '--verbosity', + nargs='?', + type=str, + default='info', + choices=['warning', 'info', 'debug'], + help='Log level, defaults to "info"', + ) + parser.add_argument( + '--sigint-timeout', + nargs='?', + type=int, + default=5, + help='How long to wait for the sigint timeout before sending sigkill.', + ) + parser.add_argument( + '--grace-period', + nargs='?', + type=float, + default=0, + help='Number of seconds after the process is started before watching for changes.', + ) + parser.add_argument( + '--sigkill-timeout', + nargs='?', + type=int, + default=1, + help='How long to wait for the sigkill timeout before issuing a timeout exception.', + ) + parser.add_argument( + '--ignore-permission-denied', + action='store_true', + help='Ignore permission denied errors while watching files and directories.', + ) + parser.add_argument('--version', '-V', action='version', version=f'%(prog)s v{VERSION}') + arg_namespace = parser.parse_args(args) + + if arg_namespace.verbose: + log_level = logging.DEBUG + else: + log_level = getattr(logging, arg_namespace.verbosity.upper()) + + hdlr = logging.StreamHandler() + hdlr.setLevel(log_level) + hdlr.setFormatter(logging.Formatter(fmt='[%(asctime)s] %(message)s', datefmt='%H:%M:%S')) + wg_logger = logging.getLogger('watchfiles') + wg_logger.addHandler(hdlr) + wg_logger.setLevel(log_level) + + if arg_namespace.target_type == 'auto': + target_type = detect_target_type(arg_namespace.target) + else: + target_type = arg_namespace.target_type + + if target_type == 'function': + logger.debug('target_type=function, attempting import of "%s"', arg_namespace.target) + import_exit(arg_namespace.target) + if arg_namespace.args: + sys.argv = [arg_namespace.target] + shlex.split(arg_namespace.args) + elif arg_namespace.args: + logger.warning('--args is only used when the target is a function') + + try: + paths = [resolve_path(p) for p in arg_namespace.paths] + except FileNotFoundError as e: + print(f'path "{e}" does not exist', file=sys.stderr) + sys.exit(1) + + watch_filter, watch_filter_str = build_filter(arg_namespace.filter, arg_namespace.ignore_paths) + + logger.info( + 'watchfiles v%s 👀 path=%s target="%s" (%s) filter=%s...', + VERSION, + ', '.join(f'"{p}"' for p in paths), + arg_namespace.target, + target_type, + watch_filter_str, + ) + + run_process( + *paths, + target=arg_namespace.target, + target_type=target_type, + watch_filter=watch_filter, + debug=log_level == logging.DEBUG, + sigint_timeout=arg_namespace.sigint_timeout, + sigkill_timeout=arg_namespace.sigkill_timeout, + recursive=not arg_namespace.non_recursive, + ignore_permission_denied=arg_namespace.ignore_permission_denied, + grace_period=arg_namespace.grace_period, + ) + + +def import_exit(function_path: str) -> Any: + cwd = os.getcwd() + if cwd not in sys.path: + sys.path.append(cwd) + + try: + return import_string(function_path) + except ImportError as e: + print(f'ImportError: {e}', file=sys.stderr) + sys.exit(1) + + +def build_filter( + filter_name: str, ignore_paths_str: Optional[str] +) -> Tuple[Union[None, DefaultFilter, Callable[[Change, str], bool]], str]: + ignore_paths: List[Path] = [] + if ignore_paths_str: + ignore_paths = [Path(p).resolve() for p in ignore_paths_str.split(',')] + + if filter_name == 'default': + return DefaultFilter(ignore_paths=ignore_paths), 'DefaultFilter' + elif filter_name == 'python': + return PythonFilter(ignore_paths=ignore_paths), 'PythonFilter' + elif filter_name == 'all': + if ignore_paths: + logger.warning('"--ignore-paths" argument ignored as "all" filter was selected') + return None, '(no filter)' + + watch_filter_cls = import_exit(filter_name) + if isinstance(watch_filter_cls, type) and issubclass(watch_filter_cls, DefaultFilter): + return watch_filter_cls(ignore_paths=ignore_paths), watch_filter_cls.__name__ + + if ignore_paths: + logger.warning('"--ignore-paths" argument ignored as filter is not a subclass of DefaultFilter') + + if isinstance(watch_filter_cls, type) and issubclass(watch_filter_cls, BaseFilter): + return watch_filter_cls(), watch_filter_cls.__name__ + else: + watch_filter = cast(Callable[[Change, str], bool], watch_filter_cls) + return watch_filter, repr(watch_filter_cls) diff --git a/hackaton/lib/python3.12/site-packages/watchfiles/filters.py b/hackaton/lib/python3.12/site-packages/watchfiles/filters.py new file mode 100644 index 0000000..d97dfe8 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/watchfiles/filters.py @@ -0,0 +1,149 @@ +import logging +import os +import re +from pathlib import Path +from typing import TYPE_CHECKING, Optional, Sequence, Union + +__all__ = 'BaseFilter', 'DefaultFilter', 'PythonFilter' +logger = logging.getLogger('watchfiles.watcher') + + +if TYPE_CHECKING: + from .main import Change + + +class BaseFilter: + """ + Useful base class for creating filters. `BaseFilter` should be inherited and configured, rather than used + directly. + + The class supports ignoring files in 3 ways: + """ + + __slots__ = '_ignore_dirs', '_ignore_entity_regexes', '_ignore_paths' + ignore_dirs: Sequence[str] = () + """Full names of directories to ignore, an obvious example would be `.git`.""" + ignore_entity_patterns: Sequence[str] = () + """ + Patterns of files or directories to ignore, these are compiled into regexes. + + "entity" here refers to the specific file or directory - basically the result of `path.split(os.sep)[-1]`, + an obvious example would be `r'\\.py[cod]$'`. + """ + ignore_paths: Sequence[Union[str, Path]] = () + """ + Full paths to ignore, e.g. `/home/users/.cache` or `C:\\Users\\user\\.cache`. + """ + + def __init__(self) -> None: + self._ignore_dirs = set(self.ignore_dirs) + self._ignore_entity_regexes = tuple(re.compile(r) for r in self.ignore_entity_patterns) + self._ignore_paths = tuple(map(str, self.ignore_paths)) + + def __call__(self, change: 'Change', path: str) -> bool: + """ + Instances of `BaseFilter` subclasses can be used as callables. + Args: + change: The type of change that occurred, see [`Change`][watchfiles.Change]. + path: the raw path of the file or directory that changed. + + Returns: + True if the file should be included in changes, False if it should be ignored. + """ + parts = path.lstrip(os.sep).split(os.sep) + if any(p in self._ignore_dirs for p in parts): + return False + + entity_name = parts[-1] + if any(r.search(entity_name) for r in self._ignore_entity_regexes): + return False + elif self._ignore_paths and path.startswith(self._ignore_paths): + return False + else: + return True + + def __repr__(self) -> str: + args = ', '.join(f'{k}={getattr(self, k, None)!r}' for k in self.__slots__) + return f'{self.__class__.__name__}({args})' + + +class DefaultFilter(BaseFilter): + """ + The default filter, which ignores files and directories that you might commonly want to ignore. + """ + + ignore_dirs: Sequence[str] = ( + '__pycache__', + '.git', + '.hg', + '.svn', + '.tox', + '.venv', + '.idea', + 'node_modules', + '.mypy_cache', + '.pytest_cache', + '.hypothesis', + ) + """Directory names to ignore.""" + + ignore_entity_patterns: Sequence[str] = ( + r'\.py[cod]$', + r'\.___jb_...___$', + r'\.sw.$', + '~$', + r'^\.\#', + r'^\.DS_Store$', + r'^flycheck_', + ) + """File/Directory name patterns to ignore.""" + + def __init__( + self, + *, + ignore_dirs: Optional[Sequence[str]] = None, + ignore_entity_patterns: Optional[Sequence[str]] = None, + ignore_paths: Optional[Sequence[Union[str, Path]]] = None, + ) -> None: + """ + Args: + ignore_dirs: if not `None`, overrides the `ignore_dirs` value set on the class. + ignore_entity_patterns: if not `None`, overrides the `ignore_entity_patterns` value set on the class. + ignore_paths: if not `None`, overrides the `ignore_paths` value set on the class. + """ + if ignore_dirs is not None: + self.ignore_dirs = ignore_dirs + if ignore_entity_patterns is not None: + self.ignore_entity_patterns = ignore_entity_patterns + if ignore_paths is not None: + self.ignore_paths = ignore_paths + + super().__init__() + + +class PythonFilter(DefaultFilter): + """ + A filter for Python files, since this class inherits from [`DefaultFilter`][watchfiles.DefaultFilter] + it will ignore files and directories that you might commonly want to ignore as well as filtering out + all changes except in Python files (files with extensions `('.py', '.pyx', '.pyd')`). + """ + + def __init__( + self, + *, + ignore_paths: Optional[Sequence[Union[str, Path]]] = None, + extra_extensions: Sequence[str] = (), + ) -> None: + """ + Args: + ignore_paths: The paths to ignore, see [`BaseFilter`][watchfiles.BaseFilter]. + extra_extensions: extra extensions to ignore. + + `ignore_paths` and `extra_extensions` can be passed as arguments partly to support [CLI](../cli.md) usage where + `--ignore-paths` and `--extensions` can be passed as arguments. + """ + self.extensions = ('.py', '.pyx', '.pyd') + tuple(extra_extensions) + super().__init__(ignore_paths=ignore_paths) + + def __call__(self, change: 'Change', path: str) -> bool: + return path.endswith(self.extensions) and super().__call__(change, path) diff --git a/hackaton/lib/python3.12/site-packages/watchfiles/main.py b/hackaton/lib/python3.12/site-packages/watchfiles/main.py new file mode 100644 index 0000000..59e507b --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/watchfiles/main.py @@ -0,0 +1,353 @@ +import logging +import os +import sys +import warnings +from enum import IntEnum +from pathlib import Path +from typing import TYPE_CHECKING, AsyncGenerator, Callable, Generator, Optional, Set, Tuple, Union + +import anyio + +from ._rust_notify import RustNotify +from .filters import DefaultFilter + +__all__ = 'watch', 'awatch', 'Change', 'FileChange' +logger = logging.getLogger('watchfiles.main') + + +class Change(IntEnum): + """ + Enum representing the type of change that occurred. + """ + + added = 1 + """A new file or directory was added.""" + modified = 2 + """A file or directory was modified, can be either a metadata or data change.""" + deleted = 3 + """A file or directory was deleted.""" + + def raw_str(self) -> str: + return self.name + + +FileChange = Tuple[Change, str] +""" +A tuple representing a file change, first element is a [`Change`][watchfiles.Change] member, second is the path +of the file or directory that changed. +""" + +if TYPE_CHECKING: + import asyncio + from typing import Protocol + + import trio + + AnyEvent = Union[anyio.Event, asyncio.Event, trio.Event] + + class AbstractEvent(Protocol): + def is_set(self) -> bool: ... + + +def watch( + *paths: Union[Path, str], + watch_filter: Optional[Callable[['Change', str], bool]] = DefaultFilter(), + debounce: int = 1_600, + step: int = 50, + stop_event: Optional['AbstractEvent'] = None, + rust_timeout: int = 5_000, + yield_on_timeout: bool = False, + debug: Optional[bool] = None, + raise_interrupt: bool = True, + force_polling: Optional[bool] = None, + poll_delay_ms: int = 300, + recursive: bool = True, + ignore_permission_denied: Optional[bool] = None, +) -> Generator[Set[FileChange], None, None]: + """ + Watch one or more paths and yield a set of changes whenever files change. + + The paths watched can be directories or files, directories are watched recursively - changes in subdirectories + are also detected. + + #### Force polling + + Notify will fall back to file polling if it can't use file system notifications, but we also force Notify + to use polling if the `force_polling` argument is `True`; if `force_polling` is unset (or `None`), we enable + force polling thus: + + * if the `WATCHFILES_FORCE_POLLING` environment variable exists and is not empty: + * if the value is `false`, `disable` or `disabled`, force polling is disabled + * otherwise, force polling is enabled + * otherwise, we enable force polling only if we detect we're running on WSL (Windows Subsystem for Linux) + + Args: + *paths: filesystem paths to watch. + watch_filter: callable used to filter out changes which are not important, you can either use a raw callable + or a [`BaseFilter`][watchfiles.BaseFilter] instance, + defaults to an instance of [`DefaultFilter`][watchfiles.DefaultFilter]. To keep all changes, use `None`. + debounce: maximum time in milliseconds to group changes over before yielding them. + step: time to wait for new changes in milliseconds, if no changes are detected in this time, and + at least one change has been detected, the changes are yielded. + stop_event: event to stop watching, if this is set, the generator will stop iteration, + this can be anything with an `is_set()` method which returns a bool, e.g. `threading.Event()`. + rust_timeout: maximum time in milliseconds to wait in the rust code for changes, `0` means no timeout. + yield_on_timeout: if `True`, the generator will yield upon timeout in rust even if no changes are detected. + debug: whether to print information about all filesystem changes in rust to stdout, if `None` will use the + `WATCHFILES_DEBUG` environment variable. + raise_interrupt: whether to re-raise `KeyboardInterrupt`s, or suppress the error and just stop iterating. + force_polling: See [Force polling](#force-polling) above. + poll_delay_ms: delay between polling for changes, only used if `force_polling=True`. + recursive: if `True`, watch for changes in sub-directories recursively, otherwise watch only for changes in the + top-level directory, default is `True`. + ignore_permission_denied: if `True`, will ignore permission denied errors, otherwise will raise them by default. + Setting the `WATCHFILES_IGNORE_PERMISSION_DENIED` environment variable will set this value too. + + Yields: + The generator yields sets of [`FileChange`][watchfiles.main.FileChange]s. + + ```py title="Example of watch usage" + from watchfiles import watch + + for changes in watch('./first/dir', './second/dir', raise_interrupt=False): + print(changes) + ``` + """ + force_polling = _default_force_polling(force_polling) + ignore_permission_denied = _default_ignore_permission_denied(ignore_permission_denied) + debug = _default_debug(debug) + with RustNotify( + [str(p) for p in paths], debug, force_polling, poll_delay_ms, recursive, ignore_permission_denied + ) as watcher: + while True: + raw_changes = watcher.watch(debounce, step, rust_timeout, stop_event) + if raw_changes == 'timeout': + if yield_on_timeout: + yield set() + else: + logger.debug('rust notify timeout, continuing') + elif raw_changes == 'signal': + if raise_interrupt: + raise KeyboardInterrupt + else: + logger.warning('KeyboardInterrupt caught, stopping watch') + return + elif raw_changes == 'stop': + return + else: + changes = _prep_changes(raw_changes, watch_filter) + if changes: + _log_changes(changes) + yield changes + else: + logger.debug('all changes filtered out, raw_changes=%s', raw_changes) + + +async def awatch( # C901 + *paths: Union[Path, str], + watch_filter: Optional[Callable[[Change, str], bool]] = DefaultFilter(), + debounce: int = 1_600, + step: int = 50, + stop_event: Optional['AnyEvent'] = None, + rust_timeout: Optional[int] = None, + yield_on_timeout: bool = False, + debug: Optional[bool] = None, + raise_interrupt: Optional[bool] = None, + force_polling: Optional[bool] = None, + poll_delay_ms: int = 300, + recursive: bool = True, + ignore_permission_denied: Optional[bool] = None, +) -> AsyncGenerator[Set[FileChange], None]: + """ + Asynchronous equivalent of [`watch`][watchfiles.watch] using threads to wait for changes. + Arguments match those of [`watch`][watchfiles.watch] except `stop_event`. + + All async methods use [anyio](https://anyio.readthedocs.io/en/latest/) to run the event loop. + + Unlike [`watch`][watchfiles.watch] `KeyboardInterrupt` cannot be suppressed by `awatch` so they need to be caught + where `asyncio.run` or equivalent is called. + + Args: + *paths: filesystem paths to watch. + watch_filter: matches the same argument of [`watch`][watchfiles.watch]. + debounce: matches the same argument of [`watch`][watchfiles.watch]. + step: matches the same argument of [`watch`][watchfiles.watch]. + stop_event: `anyio.Event` which can be used to stop iteration, see example below. + rust_timeout: matches the same argument of [`watch`][watchfiles.watch], except that `None` means + use `1_000` on Windows and `5_000` on other platforms thus helping with exiting on `Ctrl+C` on Windows, + see [#110](https://github.com/samuelcolvin/watchfiles/issues/110). + yield_on_timeout: matches the same argument of [`watch`][watchfiles.watch]. + debug: matches the same argument of [`watch`][watchfiles.watch]. + raise_interrupt: This is deprecated, `KeyboardInterrupt` will cause this coroutine to be cancelled and then + be raised by the top level `asyncio.run` call or equivalent, and should be caught there. + See [#136](https://github.com/samuelcolvin/watchfiles/issues/136) + force_polling: if true, always use polling instead of file system notifications, default is `None` where + `force_polling` is set to `True` if the `WATCHFILES_FORCE_POLLING` environment variable exists. + poll_delay_ms: delay between polling for changes, only used if `force_polling=True`. + recursive: if `True`, watch for changes in sub-directories recursively, otherwise watch only for changes in the + top-level directory, default is `True`. + ignore_permission_denied: if `True`, will ignore permission denied errors, otherwise will raise them by default. + Setting the `WATCHFILES_IGNORE_PERMISSION_DENIED` environment variable will set this value too. + + Yields: + The generator yields sets of [`FileChange`][watchfiles.main.FileChange]s. + + ```py title="Example of awatch usage" + import asyncio + from watchfiles import awatch + + async def main(): + async for changes in awatch('./first/dir', './second/dir'): + print(changes) + + if __name__ == '__main__': + try: + asyncio.run(main()) + except KeyboardInterrupt: + print('stopped via KeyboardInterrupt') + ``` + + ```py title="Example of awatch usage with a stop event" + import asyncio + from watchfiles import awatch + + async def main(): + stop_event = asyncio.Event() + + async def stop_soon(): + await asyncio.sleep(3) + stop_event.set() + + stop_soon_task = asyncio.create_task(stop_soon()) + + async for changes in awatch('/path/to/dir', stop_event=stop_event): + print(changes) + + # cleanup by awaiting the (now complete) stop_soon_task + await stop_soon_task + + asyncio.run(main()) + ``` + """ + if raise_interrupt is not None: + warnings.warn( + 'raise_interrupt is deprecated, KeyboardInterrupt will cause this coroutine to be cancelled and then ' + 'be raised by the top level asyncio.run call or equivalent, and should be caught there. See #136.', + DeprecationWarning, + ) + + if stop_event is None: + stop_event_: 'AnyEvent' = anyio.Event() + else: + stop_event_ = stop_event + + force_polling = _default_force_polling(force_polling) + ignore_permission_denied = _default_ignore_permission_denied(ignore_permission_denied) + debug = _default_debug(debug) + with RustNotify( + [str(p) for p in paths], debug, force_polling, poll_delay_ms, recursive, ignore_permission_denied + ) as watcher: + timeout = _calc_async_timeout(rust_timeout) + CancelledError = anyio.get_cancelled_exc_class() + + while True: + async with anyio.create_task_group() as tg: + try: + raw_changes = await anyio.to_thread.run_sync(watcher.watch, debounce, step, timeout, stop_event_) + except (CancelledError, KeyboardInterrupt): + stop_event_.set() + # suppressing KeyboardInterrupt wouldn't stop it getting raised by the top level asyncio.run call + raise + tg.cancel_scope.cancel() + + if raw_changes == 'timeout': + if yield_on_timeout: + yield set() + else: + logger.debug('rust notify timeout, continuing') + elif raw_changes == 'stop': + return + elif raw_changes == 'signal': + # in theory the watch thread should never get a signal + raise RuntimeError('watch thread unexpectedly received a signal') + else: + changes = _prep_changes(raw_changes, watch_filter) + if changes: + _log_changes(changes) + yield changes + else: + logger.debug('all changes filtered out, raw_changes=%s', raw_changes) + + +def _prep_changes( + raw_changes: Set[Tuple[int, str]], watch_filter: Optional[Callable[[Change, str], bool]] +) -> Set[FileChange]: + # if we wanted to be really snazzy, we could move this into rust + changes = {(Change(change), path) for change, path in raw_changes} + if watch_filter: + changes = {c for c in changes if watch_filter(c[0], c[1])} + return changes + + +def _log_changes(changes: Set[FileChange]) -> None: + if logger.isEnabledFor(logging.INFO): # pragma: no branch + count = len(changes) + plural = '' if count == 1 else 's' + if logger.isEnabledFor(logging.DEBUG): + logger.debug('%d change%s detected: %s', count, plural, changes) + else: + logger.info('%d change%s detected', count, plural) + + +def _calc_async_timeout(timeout: Optional[int]) -> int: + """ + see https://github.com/samuelcolvin/watchfiles/issues/110 + """ + if timeout is None: + if sys.platform == 'win32': + return 1_000 + else: + return 5_000 + else: + return timeout + + +def _default_force_polling(force_polling: Optional[bool]) -> bool: + """ + See docstring for `watch` above for details. + + See samuelcolvin/watchfiles#167 and samuelcolvin/watchfiles#187 for discussion and rationale. + """ + if force_polling is not None: + return force_polling + env_var = os.getenv('WATCHFILES_FORCE_POLLING') + if env_var: + return env_var.lower() not in {'false', 'disable', 'disabled'} + else: + return _auto_force_polling() + + +def _default_debug(debug: Optional[bool]) -> bool: + if debug is not None: + return debug + env_var = os.getenv('WATCHFILES_DEBUG') + return bool(env_var) + + +def _auto_force_polling() -> bool: + """ + Whether to auto-enable force polling, it should be enabled automatically only on WSL. + + See samuelcolvin/watchfiles#187 for discussion. + """ + import platform + + uname = platform.uname() + return 'microsoft-standard' in uname.release.lower() and uname.system.lower() == 'linux' + + +def _default_ignore_permission_denied(ignore_permission_denied: Optional[bool]) -> bool: + if ignore_permission_denied is not None: + return ignore_permission_denied + env_var = os.getenv('WATCHFILES_IGNORE_PERMISSION_DENIED') + return bool(env_var) diff --git a/hackaton/lib/python3.12/site-packages/watchfiles/py.typed b/hackaton/lib/python3.12/site-packages/watchfiles/py.typed new file mode 100644 index 0000000..7cd6d6f --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/watchfiles/py.typed @@ -0,0 +1 @@ +# Marker file for PEP 561. The watchfiles package uses inline types. diff --git a/hackaton/lib/python3.12/site-packages/watchfiles/run.py b/hackaton/lib/python3.12/site-packages/watchfiles/run.py new file mode 100644 index 0000000..cd40358 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/watchfiles/run.py @@ -0,0 +1,438 @@ +import contextlib +import json +import logging +import os +import re +import shlex +import signal +import subprocess +import sys +from importlib import import_module +from multiprocessing import get_context +from multiprocessing.context import SpawnProcess +from pathlib import Path +from time import sleep +from typing import TYPE_CHECKING, Any, Callable, Dict, Generator, List, Optional, Set, Tuple, Union + +import anyio + +from .filters import DefaultFilter +from .main import Change, FileChange, awatch, watch + +if TYPE_CHECKING: + from typing import Literal + +__all__ = 'run_process', 'arun_process', 'detect_target_type', 'import_string' +logger = logging.getLogger('watchfiles.main') + + +def run_process( + *paths: Union[Path, str], + target: Union[str, Callable[..., Any]], + args: Tuple[Any, ...] = (), + kwargs: Optional[Dict[str, Any]] = None, + target_type: "Literal['function', 'command', 'auto']" = 'auto', + callback: Optional[Callable[[Set[FileChange]], None]] = None, + watch_filter: Optional[Callable[[Change, str], bool]] = DefaultFilter(), + grace_period: float = 0, + debounce: int = 1_600, + step: int = 50, + debug: Optional[bool] = None, + sigint_timeout: int = 5, + sigkill_timeout: int = 1, + recursive: bool = True, + ignore_permission_denied: bool = False, +) -> int: + """ + Run a process and restart it upon file changes. + + `run_process` can work in two ways: + + * Using `multiprocessing.Process` † to run a python function + * Or, using `subprocess.Popen` to run a command + + !!! note + + **†** technically `multiprocessing.get_context('spawn').Process` to avoid forking and improve + code reload/import. + + Internally, `run_process` uses [`watch`][watchfiles.watch] with `raise_interrupt=False` so the function + exits cleanly upon `Ctrl+C`. + + Args: + *paths: matches the same argument of [`watch`][watchfiles.watch] + target: function or command to run + args: arguments to pass to `target`, only used if `target` is a function + kwargs: keyword arguments to pass to `target`, only used if `target` is a function + target_type: type of target. Can be `'function'`, `'command'`, or `'auto'` in which case + [`detect_target_type`][watchfiles.run.detect_target_type] is used to determine the type. + callback: function to call on each reload, the function should accept a set of changes as the sole argument + watch_filter: matches the same argument of [`watch`][watchfiles.watch] + grace_period: number of seconds after the process is started before watching for changes + debounce: matches the same argument of [`watch`][watchfiles.watch] + step: matches the same argument of [`watch`][watchfiles.watch] + debug: matches the same argument of [`watch`][watchfiles.watch] + sigint_timeout: the number of seconds to wait after sending sigint before sending sigkill + sigkill_timeout: the number of seconds to wait after sending sigkill before raising an exception + recursive: matches the same argument of [`watch`][watchfiles.watch] + + Returns: + number of times the function was reloaded. + + ```py title="Example of run_process running a function" + from watchfiles import run_process + + def callback(changes): + print('changes detected:', changes) + + def foobar(a, b): + print('foobar called with:', a, b) + + if __name__ == '__main__': + run_process('./path/to/dir', target=foobar, args=(1, 2), callback=callback) + ``` + + As well as using a `callback` function, changes can be accessed from within the target function, + using the `WATCHFILES_CHANGES` environment variable. + + ```py title="Example of run_process accessing changes" + from watchfiles import run_process + + def foobar(a, b, c): + # changes will be an empty list "[]" the first time the function is called + changes = os.getenv('WATCHFILES_CHANGES') + changes = json.loads(changes) + print('foobar called due to changes:', changes) + + if __name__ == '__main__': + run_process('./path/to/dir', target=foobar, args=(1, 2, 3)) + ``` + + Again with the target as `command`, `WATCHFILES_CHANGES` can be used + to access changes. + + ```bash title="example.sh" + echo "changers: ${WATCHFILES_CHANGES}" + ``` + + ```py title="Example of run_process running a command" + from watchfiles import run_process + + if __name__ == '__main__': + run_process('.', target='./example.sh') + ``` + """ + if target_type == 'auto': + target_type = detect_target_type(target) + + logger.debug('running "%s" as %s', target, target_type) + catch_sigterm() + process = start_process(target, target_type, args, kwargs) + reloads = 0 + + if grace_period: + logger.debug('sleeping for %s seconds before watching for changes', grace_period) + sleep(grace_period) + + try: + for changes in watch( + *paths, + watch_filter=watch_filter, + debounce=debounce, + step=step, + debug=debug, + raise_interrupt=False, + recursive=recursive, + ignore_permission_denied=ignore_permission_denied, + ): + callback and callback(changes) + process.stop(sigint_timeout=sigint_timeout, sigkill_timeout=sigkill_timeout) + process = start_process(target, target_type, args, kwargs, changes) + reloads += 1 + finally: + process.stop() + return reloads + + +async def arun_process( + *paths: Union[Path, str], + target: Union[str, Callable[..., Any]], + args: Tuple[Any, ...] = (), + kwargs: Optional[Dict[str, Any]] = None, + target_type: "Literal['function', 'command', 'auto']" = 'auto', + callback: Optional[Callable[[Set[FileChange]], Any]] = None, + watch_filter: Optional[Callable[[Change, str], bool]] = DefaultFilter(), + grace_period: float = 0, + debounce: int = 1_600, + step: int = 50, + debug: Optional[bool] = None, + recursive: bool = True, + ignore_permission_denied: bool = False, +) -> int: + """ + Async equivalent of [`run_process`][watchfiles.run_process], all arguments match those of `run_process` except + `callback` which can be a coroutine. + + Starting and stopping the process and watching for changes is done in a separate thread. + + As with `run_process`, internally `arun_process` uses [`awatch`][watchfiles.awatch], however `KeyboardInterrupt` + cannot be caught and suppressed in `awatch` so these errors need to be caught separately, see below. + + ```py title="Example of arun_process usage" + import asyncio + from watchfiles import arun_process + + async def callback(changes): + await asyncio.sleep(0.1) + print('changes detected:', changes) + + def foobar(a, b): + print('foobar called with:', a, b) + + async def main(): + await arun_process('.', target=foobar, args=(1, 2), callback=callback) + + if __name__ == '__main__': + try: + asyncio.run(main()) + except KeyboardInterrupt: + print('stopped via KeyboardInterrupt') + ``` + """ + import inspect + + if target_type == 'auto': + target_type = detect_target_type(target) + + logger.debug('running "%s" as %s', target, target_type) + catch_sigterm() + process = await anyio.to_thread.run_sync(start_process, target, target_type, args, kwargs) + reloads = 0 + + if grace_period: + logger.debug('sleeping for %s seconds before watching for changes', grace_period) + await anyio.sleep(grace_period) + + async for changes in awatch( + *paths, + watch_filter=watch_filter, + debounce=debounce, + step=step, + debug=debug, + recursive=recursive, + ignore_permission_denied=ignore_permission_denied, + ): + if callback is not None: + r = callback(changes) + if inspect.isawaitable(r): + await r + + await anyio.to_thread.run_sync(process.stop) + process = await anyio.to_thread.run_sync(start_process, target, target_type, args, kwargs, changes) + reloads += 1 + await anyio.to_thread.run_sync(process.stop) + return reloads + + +# Use spawn context to make sure code run in subprocess +# does not reuse imported modules in main process/context +spawn_context = get_context('spawn') + + +def split_cmd(cmd: str) -> List[str]: + import platform + + posix = platform.uname().system.lower() != 'windows' + return shlex.split(cmd, posix=posix) + + +def start_process( + target: Union[str, Callable[..., Any]], + target_type: "Literal['function', 'command']", + args: Tuple[Any, ...], + kwargs: Optional[Dict[str, Any]], + changes: Optional[Set[FileChange]] = None, +) -> 'CombinedProcess': + if changes is None: + changes_env_var = '[]' + else: + changes_env_var = json.dumps([[c.raw_str(), p] for c, p in changes]) + + os.environ['WATCHFILES_CHANGES'] = changes_env_var + + process: 'Union[SpawnProcess, subprocess.Popen[bytes]]' + if target_type == 'function': + kwargs = kwargs or {} + if isinstance(target, str): + args = target, get_tty_path(), args, kwargs + target_ = run_function + kwargs = {} + else: + target_ = target + + process = spawn_context.Process(target=target_, args=args, kwargs=kwargs) + process.start() + else: + if args or kwargs: + logger.warning('ignoring args and kwargs for "command" target') + + assert isinstance(target, str), 'target must be a string to run as a command' + popen_args = split_cmd(target) + process = subprocess.Popen(popen_args) + return CombinedProcess(process) + + +def detect_target_type(target: Union[str, Callable[..., Any]]) -> "Literal['function', 'command']": + """ + Used by [`run_process`][watchfiles.run_process], [`arun_process`][watchfiles.arun_process] + and indirectly the CLI to determine the target type with `target_type` is `auto`. + + Detects the target type - either `function` or `command`. This method is only called with `target_type='auto'`. + + The following logic is employed: + + * If `target` is not a string, it is assumed to be a function + * If `target` ends with `.py` or `.sh`, it is assumed to be a command + * Otherwise, the target is assumed to be a function if it matches the regex `[a-zA-Z0-9_]+(\\.[a-zA-Z0-9_]+)+` + + If this logic does not work for you, specify the target type explicitly using the `target_type` function argument + or `--target-type` command line argument. + + Args: + target: The target value + + Returns: + either `'function'` or `'command'` + """ + if not isinstance(target, str): + return 'function' + elif target.endswith(('.py', '.sh')): + return 'command' + elif re.fullmatch(r'[a-zA-Z0-9_]+(\.[a-zA-Z0-9_]+)+', target): + return 'function' + else: + return 'command' + + +class CombinedProcess: + def __init__(self, p: 'Union[SpawnProcess, subprocess.Popen[bytes]]'): + self._p = p + assert self.pid is not None, 'process not yet spawned' + + def stop(self, sigint_timeout: int = 5, sigkill_timeout: int = 1) -> None: + os.environ.pop('WATCHFILES_CHANGES', None) + if self.is_alive(): + logger.debug('stopping process...') + + os.kill(self.pid, signal.SIGINT) + + try: + self.join(sigint_timeout) + except subprocess.TimeoutExpired: + # Capture this exception to allow the self.exitcode to be reached. + # This will allow the SIGKILL to be sent, otherwise it is swallowed up. + logger.warning('SIGINT timed out after %r seconds', sigint_timeout) + pass + + if self.exitcode is None: + logger.warning('process has not terminated, sending SIGKILL') + os.kill(self.pid, signal.SIGKILL) + self.join(sigkill_timeout) + else: + logger.debug('process stopped') + else: + logger.warning('process already dead, exit code: %d', self.exitcode) + + def is_alive(self) -> bool: + if isinstance(self._p, SpawnProcess): + return self._p.is_alive() + else: + return self._p.poll() is None + + @property + def pid(self) -> int: + # we check the process has always been spawned when CombinedProcess is initialised + return self._p.pid # type: ignore[return-value] + + def join(self, timeout: int) -> None: + if isinstance(self._p, SpawnProcess): + self._p.join(timeout) + else: + self._p.wait(timeout) + + @property + def exitcode(self) -> Optional[int]: + if isinstance(self._p, SpawnProcess): + return self._p.exitcode + else: + return self._p.returncode + + +def run_function(function: str, tty_path: Optional[str], args: Tuple[Any, ...], kwargs: Dict[str, Any]) -> None: + with set_tty(tty_path): + func = import_string(function) + func(*args, **kwargs) + + +def import_string(dotted_path: str) -> Any: + """ + Stolen approximately from django. Import a dotted module path and return the attribute/class designated by the + last name in the path. Raise ImportError if the import fails. + """ + try: + module_path, class_name = dotted_path.strip(' ').rsplit('.', 1) + except ValueError as e: + raise ImportError(f'"{dotted_path}" doesn\'t look like a module path') from e + + module = import_module(module_path) + try: + return getattr(module, class_name) + except AttributeError as e: + raise ImportError(f'Module "{module_path}" does not define a "{class_name}" attribute') from e + + +def get_tty_path() -> Optional[str]: # pragma: no cover + """ + Return the path to the current TTY, if any. + + Virtually impossible to test in pytest, hence no cover. + """ + try: + return os.ttyname(sys.stdin.fileno()) + except OSError: + # fileno() always fails with pytest + return '/dev/tty' + except AttributeError: + # on Windows. No idea of a better solution + return None + + +@contextlib.contextmanager +def set_tty(tty_path: Optional[str]) -> Generator[None, None, None]: + if tty_path: + try: + with open(tty_path) as tty: # pragma: no cover + sys.stdin = tty + yield + except OSError: + # eg. "No such device or address: '/dev/tty'", see https://github.com/samuelcolvin/watchfiles/issues/40 + yield + else: + # currently on windows tty_path is None and there's nothing we can do here + yield + + +def raise_keyboard_interrupt(signum: int, _frame: Any) -> None: # pragma: no cover + logger.warning('received signal %s, raising KeyboardInterrupt', signal.Signals(signum)) + raise KeyboardInterrupt + + +def catch_sigterm() -> None: + """ + Catch SIGTERM and raise KeyboardInterrupt instead. This means watchfiles will stop quickly + on `docker compose stop` and other cases where SIGTERM is sent. + + Without this the watchfiles process will be killed while a running process will continue uninterrupted. + """ + logger.debug('registering handler for SIGTERM on watchfiles process %d', os.getpid()) + signal.signal(signal.SIGTERM, raise_keyboard_interrupt) diff --git a/hackaton/lib/python3.12/site-packages/watchfiles/version.py b/hackaton/lib/python3.12/site-packages/watchfiles/version.py new file mode 100644 index 0000000..f55721f --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/watchfiles/version.py @@ -0,0 +1,5 @@ +from ._rust_notify import __version__ + +__all__ = ('VERSION',) + +VERSION = __version__ diff --git a/hackaton/lib/python3.12/site-packages/websockets-13.1.dist-info/INSTALLER b/hackaton/lib/python3.12/site-packages/websockets-13.1.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/websockets-13.1.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/hackaton/lib/python3.12/site-packages/websockets-13.1.dist-info/LICENSE b/hackaton/lib/python3.12/site-packages/websockets-13.1.dist-info/LICENSE new file mode 100644 index 0000000..5d61ece --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/websockets-13.1.dist-info/LICENSE @@ -0,0 +1,24 @@ +Copyright (c) Aymeric Augustin and contributors + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of the copyright holder nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/hackaton/lib/python3.12/site-packages/websockets-13.1.dist-info/METADATA b/hackaton/lib/python3.12/site-packages/websockets-13.1.dist-info/METADATA new file mode 100644 index 0000000..be70919 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/websockets-13.1.dist-info/METADATA @@ -0,0 +1,177 @@ +Metadata-Version: 2.1 +Name: websockets +Version: 13.1 +Summary: An implementation of the WebSocket Protocol (RFC 6455 & 7692) +Author-email: Aymeric Augustin +License: BSD-3-Clause +Project-URL: Homepage, https://github.com/python-websockets/websockets +Project-URL: Changelog, https://websockets.readthedocs.io/en/stable/project/changelog.html +Project-URL: Documentation, https://websockets.readthedocs.io/ +Project-URL: Funding, https://tidelift.com/subscription/pkg/pypi-websockets?utm_source=pypi-websockets&utm_medium=referral&utm_campaign=readme +Project-URL: Tracker, https://github.com/python-websockets/websockets/issues +Keywords: WebSocket +Classifier: Development Status :: 5 - Production/Stable +Classifier: Environment :: Web Environment +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Requires-Python: >=3.8 +Description-Content-Type: text/x-rst +License-File: LICENSE + +.. image:: logo/horizontal.svg + :width: 480px + :alt: websockets + +|licence| |version| |pyversions| |tests| |docs| |openssf| + +.. |licence| image:: https://img.shields.io/pypi/l/websockets.svg + :target: https://pypi.python.org/pypi/websockets + +.. |version| image:: https://img.shields.io/pypi/v/websockets.svg + :target: https://pypi.python.org/pypi/websockets + +.. |pyversions| image:: https://img.shields.io/pypi/pyversions/websockets.svg + :target: https://pypi.python.org/pypi/websockets + +.. |tests| image:: https://img.shields.io/github/checks-status/python-websockets/websockets/main?label=tests + :target: https://github.com/python-websockets/websockets/actions/workflows/tests.yml + +.. |docs| image:: https://img.shields.io/readthedocs/websockets.svg + :target: https://websockets.readthedocs.io/ + +.. |openssf| image:: https://bestpractices.coreinfrastructure.org/projects/6475/badge + :target: https://bestpractices.coreinfrastructure.org/projects/6475 + +What is ``websockets``? +----------------------- + +websockets is a library for building WebSocket_ servers and clients in Python +with a focus on correctness, simplicity, robustness, and performance. + +.. _WebSocket: https://developer.mozilla.org/en-US/docs/Web/API/WebSockets_API + +Built on top of ``asyncio``, Python's standard asynchronous I/O framework, the +default implementation provides an elegant coroutine-based API. + +An implementation on top of ``threading`` and a Sans-I/O implementation are also +available. + +`Documentation is available on Read the Docs. `_ + +.. copy-pasted because GitHub doesn't support the include directive + +Here's an echo server with the ``asyncio`` API: + +.. code:: python + + #!/usr/bin/env python + + import asyncio + from websockets.server import serve + + async def echo(websocket): + async for message in websocket: + await websocket.send(message) + + async def main(): + async with serve(echo, "localhost", 8765): + await asyncio.get_running_loop().create_future() # run forever + + asyncio.run(main()) + +Here's how a client sends and receives messages with the ``threading`` API: + +.. code:: python + + #!/usr/bin/env python + + from websockets.sync.client import connect + + def hello(): + with connect("ws://localhost:8765") as websocket: + websocket.send("Hello world!") + message = websocket.recv() + print(f"Received: {message}") + + hello() + + +Does that look good? + +`Get started with the tutorial! `_ + +Why should I use ``websockets``? +-------------------------------- + +The development of ``websockets`` is shaped by four principles: + +1. **Correctness**: ``websockets`` is heavily tested for compliance with + :rfc:`6455`. Continuous integration fails under 100% branch coverage. + +2. **Simplicity**: all you need to understand is ``msg = await ws.recv()`` and + ``await ws.send(msg)``. ``websockets`` takes care of managing connections + so you can focus on your application. + +3. **Robustness**: ``websockets`` is built for production. For example, it was + the only library to `handle backpressure correctly`_ before the issue + became widely known in the Python community. + +4. **Performance**: memory usage is optimized and configurable. A C extension + accelerates expensive operations. It's pre-compiled for Linux, macOS and + Windows and packaged in the wheel format for each system and Python version. + +Documentation is a first class concern in the project. Head over to `Read the +Docs`_ and see for yourself. + +.. _Read the Docs: https://websockets.readthedocs.io/ +.. _handle backpressure correctly: https://vorpus.org/blog/some-thoughts-on-asynchronous-api-design-in-a-post-asyncawait-world/#websocket-servers + +Why shouldn't I use ``websockets``? +----------------------------------- + +* If you prefer callbacks over coroutines: ``websockets`` was created to + provide the best coroutine-based API to manage WebSocket connections in + Python. Pick another library for a callback-based API. + +* If you're looking for a mixed HTTP / WebSocket library: ``websockets`` aims + at being an excellent implementation of :rfc:`6455`: The WebSocket Protocol + and :rfc:`7692`: Compression Extensions for WebSocket. Its support for HTTP + is minimal — just enough for an HTTP health check. + + If you want to do both in the same server, look at HTTP frameworks that + build on top of ``websockets`` to support WebSocket connections, like + Sanic_. + +.. _Sanic: https://sanicframework.org/en/ + +What else? +---------- + +Bug reports, patches and suggestions are welcome! + +To report a security vulnerability, please use the `Tidelift security +contact`_. Tidelift will coordinate the fix and disclosure. + +.. _Tidelift security contact: https://tidelift.com/security + +For anything else, please open an issue_ or send a `pull request`_. + +.. _issue: https://github.com/python-websockets/websockets/issues/new +.. _pull request: https://github.com/python-websockets/websockets/compare/ + +Participants must uphold the `Contributor Covenant code of conduct`_. + +.. _Contributor Covenant code of conduct: https://github.com/python-websockets/websockets/blob/main/CODE_OF_CONDUCT.md + +``websockets`` is released under the `BSD license`_. + +.. _BSD license: https://github.com/python-websockets/websockets/blob/main/LICENSE diff --git a/hackaton/lib/python3.12/site-packages/websockets-13.1.dist-info/RECORD b/hackaton/lib/python3.12/site-packages/websockets-13.1.dist-info/RECORD new file mode 100644 index 0000000..3ad97bd --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/websockets-13.1.dist-info/RECORD @@ -0,0 +1,98 @@ +websockets-13.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +websockets-13.1.dist-info/LICENSE,sha256=PWoMBQ2L7FL6utUC5F-yW9ArytvXDeo01Ee2oP9Obag,1514 +websockets-13.1.dist-info/METADATA,sha256=dYWGin2sQircUMT-_HkCtQkc5_LTzdQ7BLwIGDTHbbI,6777 +websockets-13.1.dist-info/RECORD,, +websockets-13.1.dist-info/WHEEL,sha256=lWey-nzGdePMz7fsIJ1fIMVKiFg5IkOcUVqkuIpdbws,109 +websockets-13.1.dist-info/top_level.txt,sha256=CMpdKklxKsvZgCgyltxUWOHibZXZ1uYIVpca9xsQ8Hk,11 +websockets/__init__.py,sha256=UlYOZWjPPdgEtBFq4CP5t7Kd1Jjq-iMJT62Ya9ImDSo,5936 +websockets/__main__.py,sha256=q6tBA72COhz7NUkuP_VG9IVypJjOexx2Oi7qkKNxneg,4756 +websockets/__pycache__/__init__.cpython-312.pyc,, +websockets/__pycache__/__main__.cpython-312.pyc,, +websockets/__pycache__/auth.cpython-312.pyc,, +websockets/__pycache__/client.cpython-312.pyc,, +websockets/__pycache__/connection.cpython-312.pyc,, +websockets/__pycache__/datastructures.cpython-312.pyc,, +websockets/__pycache__/exceptions.cpython-312.pyc,, +websockets/__pycache__/frames.cpython-312.pyc,, +websockets/__pycache__/headers.cpython-312.pyc,, +websockets/__pycache__/http.cpython-312.pyc,, +websockets/__pycache__/http11.cpython-312.pyc,, +websockets/__pycache__/imports.cpython-312.pyc,, +websockets/__pycache__/protocol.cpython-312.pyc,, +websockets/__pycache__/server.cpython-312.pyc,, +websockets/__pycache__/streams.cpython-312.pyc,, +websockets/__pycache__/typing.cpython-312.pyc,, +websockets/__pycache__/uri.cpython-312.pyc,, +websockets/__pycache__/utils.cpython-312.pyc,, +websockets/__pycache__/version.cpython-312.pyc,, +websockets/asyncio/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +websockets/asyncio/__pycache__/__init__.cpython-312.pyc,, +websockets/asyncio/__pycache__/async_timeout.cpython-312.pyc,, +websockets/asyncio/__pycache__/client.cpython-312.pyc,, +websockets/asyncio/__pycache__/compatibility.cpython-312.pyc,, +websockets/asyncio/__pycache__/connection.cpython-312.pyc,, +websockets/asyncio/__pycache__/messages.cpython-312.pyc,, +websockets/asyncio/__pycache__/server.cpython-312.pyc,, +websockets/asyncio/async_timeout.py,sha256=N-6Mubyiaoh66PAXGvCzhgxCM-7V2XiRnH32Xi6J6TE,8971 +websockets/asyncio/client.py,sha256=Kx9L-AYQUlMRAyo0d2DjuebggcM-rogx3JB26rEebY4,21700 +websockets/asyncio/compatibility.py,sha256=gkenDDhzNbm6_iXV5Edvbvp6uHZYdrTvGNjt8P_JtyQ,786 +websockets/asyncio/connection.py,sha256=sxX1WTz2iVxCsUvLJUoogJT9SdHsHU4ut2PuSIbxVs4,44475 +websockets/asyncio/messages.py,sha256=-sS9JCa4-aFVSv0sPJd_VtGcoADj8mE0sMxfsqW-rQw,9854 +websockets/asyncio/server.py,sha256=In45P1Ng2gznGMbnwuz3brlIAsZkSel0ScshrJZSMw8,36548 +websockets/auth.py,sha256=pCeunT3V2AdwRt_Tpq9TrkdGY7qUlDHIEqeggj5yQFk,262 +websockets/client.py,sha256=cc8y1I2Firs1JRXCfgD4j2JWnneYAuQSpGWNjrhkqFY,13541 +websockets/connection.py,sha256=OLiMVkNd25_86sB8Q7CrCwBoXy9nA0OCgdgLRA8WUR8,323 +websockets/datastructures.py,sha256=s5Rkipz4n15HSZsOrs64CoCs-_3oSBCgpe9uPvztDkY,5677 +websockets/exceptions.py,sha256=b2-QiL1pszljREQQCzbPE1Fv7-Xb-uwso2Zt6LLD10A,10594 +websockets/extensions/__init__.py,sha256=QkZsxaJVllVSp1uhdD5uPGibdbx_091GrVVfS5LXcpw,98 +websockets/extensions/__pycache__/__init__.cpython-312.pyc,, +websockets/extensions/__pycache__/base.cpython-312.pyc,, +websockets/extensions/__pycache__/permessage_deflate.cpython-312.pyc,, +websockets/extensions/base.py,sha256=jsSJnO47L2VxYzx0cZ_LLQcAyUudSDgJEtKN247H-38,2890 +websockets/extensions/permessage_deflate.py,sha256=JR9s7pvAJv2zWRUfOLysOtAiO-ovgRMqnSUpb92gohI,24661 +websockets/frames.py,sha256=H-4ULsevYdna_CjalVASRPlh2Z54NoQat_vq8P4cVfc,12765 +websockets/headers.py,sha256=9OHHZvaj4hXrofi0HuJFNYJaE0yRoPmmrBYxMaDuCTs,15931 +websockets/http.py,sha256=eWitbqWAmHeqYK4OF3JLRC4lWwI1OIeft7oY3OobXvc,481 +websockets/http11.py,sha256=-TNxOVVLr0050-0Ac3jOlWt5G9HAfkHZrt8dqoto9bs,13376 +websockets/imports.py,sha256=TNONfYXO1UPExiwCVMgmg77fH5b4nyNAKcqtTg0gO2I,2768 +websockets/legacy/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +websockets/legacy/__pycache__/__init__.cpython-312.pyc,, +websockets/legacy/__pycache__/auth.cpython-312.pyc,, +websockets/legacy/__pycache__/client.cpython-312.pyc,, +websockets/legacy/__pycache__/exceptions.cpython-312.pyc,, +websockets/legacy/__pycache__/framing.cpython-312.pyc,, +websockets/legacy/__pycache__/handshake.cpython-312.pyc,, +websockets/legacy/__pycache__/http.cpython-312.pyc,, +websockets/legacy/__pycache__/protocol.cpython-312.pyc,, +websockets/legacy/__pycache__/server.cpython-312.pyc,, +websockets/legacy/auth.py,sha256=UdK0eZg1TjMGY6iEVRbBn51M9AjpSyRv2lJbvuuI6aA,6567 +websockets/legacy/client.py,sha256=iuyFib2kX5ybK9vLVpqJRNJHa4BuA0u5MLyoNnartY4,26706 +websockets/legacy/exceptions.py,sha256=DbSHBKcDEoYoUeCxURo0cnH8PyCCKYzkTboP_tOtsxw,1967 +websockets/legacy/framing.py,sha256=ALEDiBNq17FUqNEe5LHxkPxWoY6tPwffgGFiHMdnnIs,6371 +websockets/legacy/handshake.py,sha256=2Nzr5AN2xvDC5EdNP-kB3lOcrAaUNlYuj_-hr_jv7pM,5285 +websockets/legacy/http.py,sha256=cOCQmDWhIKQmm8UWGXPW7CDZg03wjogCsb0LP9oetNQ,7061 +websockets/legacy/protocol.py,sha256=Rbk88lnbghWpcEBT-TuTtAGqDy9OA7VsUFEMUcv95RM,63681 +websockets/legacy/server.py,sha256=lb26Vm_y7biVVYLyVzG9R1BiaLmuS3TrQh-LesjO4Ss,45318 +websockets/protocol.py,sha256=yl1j9ecLShF0iTRALOTzFfq0KmW5XO74Mtk0baVkvo0,25512 +websockets/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +websockets/server.py,sha256=BVoC433LZUgKVndtaYPrndB7uf_FTuG7MXrM9QHJEzk,21275 +websockets/speedups.c,sha256=j-damnT02MKRoYw8MtTT45qLGX6z6TnriqhTkyfcNZE,5767 +websockets/speedups.cpython-312-darwin.so,sha256=q0PpI3dD2HxLI6cjVe58rmw0qA5CE0W3NAcFBFpu1w8,51376 +websockets/speedups.pyi,sha256=NikZ3sAxs9Z2uWH_ZvctvMJUBbsHeC2D1L954EVSwJc,55 +websockets/streams.py,sha256=3K3FcgTcXon-51P0sVyz0G4J-H51L82SVMS--W-gl6g,4038 +websockets/sync/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +websockets/sync/__pycache__/__init__.cpython-312.pyc,, +websockets/sync/__pycache__/client.cpython-312.pyc,, +websockets/sync/__pycache__/connection.cpython-312.pyc,, +websockets/sync/__pycache__/messages.cpython-312.pyc,, +websockets/sync/__pycache__/server.cpython-312.pyc,, +websockets/sync/__pycache__/utils.cpython-312.pyc,, +websockets/sync/client.py,sha256=QWs2wYU7S8--CZgFYWUliWjSYX5zrJEFQ6_gEcrW1sA,11372 +websockets/sync/connection.py,sha256=Ve2aW760xPz8nXU56TuL7M3qV1THYmZZcfoS_0Wwh0c,30684 +websockets/sync/messages.py,sha256=K-VHhUERUsS6bOaLgTox4kShnUKt8aPmWgOdqj_4E-Y,9809 +websockets/sync/server.py,sha256=WutnccxDQWJNfPsX2WthvDr0QeVn36fUpf0MKmbeXY0,25608 +websockets/sync/utils.py,sha256=TtW-ncYFvJmiSW2gO86ngE2BVsnnBdL-4H88kWNDYbg,1107 +websockets/typing.py,sha256=b9F78aYY-sDNnIgSbvV_ApVBicVJdduLGv5wU0PVB5c,2157 +websockets/uri.py,sha256=1r8dXNEiLcdMrCrzXmsy7DwSHiF3gaOWlmAdoFexOOM,3125 +websockets/utils.py,sha256=ZpH3WJLsQS29Jf5R6lTacxf_hPd8E4zS2JmGyNpg4bA,1150 +websockets/version.py,sha256=M0HSppy6IqnAdAr0McbPGkyCuBlue4Uzigc78cOWHxs,3202 diff --git a/hackaton/lib/python3.12/site-packages/websockets-13.1.dist-info/WHEEL b/hackaton/lib/python3.12/site-packages/websockets-13.1.dist-info/WHEEL new file mode 100644 index 0000000..b0fbf11 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/websockets-13.1.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: setuptools (75.1.0) +Root-Is-Purelib: false +Tag: cp312-cp312-macosx_11_0_arm64 + diff --git a/hackaton/lib/python3.12/site-packages/websockets-13.1.dist-info/top_level.txt b/hackaton/lib/python3.12/site-packages/websockets-13.1.dist-info/top_level.txt new file mode 100644 index 0000000..14774b4 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/websockets-13.1.dist-info/top_level.txt @@ -0,0 +1 @@ +websockets diff --git a/hackaton/lib/python3.12/site-packages/websockets/__init__.py b/hackaton/lib/python3.12/site-packages/websockets/__init__.py new file mode 100644 index 0000000..54591e9 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/websockets/__init__.py @@ -0,0 +1,199 @@ +from __future__ import annotations + +import typing + +from .imports import lazy_import +from .version import version as __version__ # noqa: F401 + + +__all__ = [ + # .client + "ClientProtocol", + # .datastructures + "Headers", + "HeadersLike", + "MultipleValuesError", + # .exceptions + "ConcurrencyError", + "ConnectionClosed", + "ConnectionClosedError", + "ConnectionClosedOK", + "DuplicateParameter", + "InvalidHandshake", + "InvalidHeader", + "InvalidHeaderFormat", + "InvalidHeaderValue", + "InvalidOrigin", + "InvalidParameterName", + "InvalidParameterValue", + "InvalidState", + "InvalidStatus", + "InvalidUpgrade", + "InvalidURI", + "NegotiationError", + "PayloadTooBig", + "ProtocolError", + "SecurityError", + "WebSocketException", + "WebSocketProtocolError", + # .legacy.auth + "BasicAuthWebSocketServerProtocol", + "basic_auth_protocol_factory", + # .legacy.client + "WebSocketClientProtocol", + "connect", + "unix_connect", + # .legacy.exceptions + "AbortHandshake", + "InvalidMessage", + "InvalidStatusCode", + "RedirectHandshake", + # .legacy.protocol + "WebSocketCommonProtocol", + # .legacy.server + "WebSocketServer", + "WebSocketServerProtocol", + "broadcast", + "serve", + "unix_serve", + # .server + "ServerProtocol", + # .typing + "Data", + "ExtensionName", + "ExtensionParameter", + "LoggerLike", + "StatusLike", + "Origin", + "Subprotocol", +] + +# When type checking, import non-deprecated aliases eagerly. Else, import on demand. +if typing.TYPE_CHECKING: + from .client import ClientProtocol + from .datastructures import Headers, HeadersLike, MultipleValuesError + from .exceptions import ( + ConcurrencyError, + ConnectionClosed, + ConnectionClosedError, + ConnectionClosedOK, + DuplicateParameter, + InvalidHandshake, + InvalidHeader, + InvalidHeaderFormat, + InvalidHeaderValue, + InvalidOrigin, + InvalidParameterName, + InvalidParameterValue, + InvalidState, + InvalidStatus, + InvalidUpgrade, + InvalidURI, + NegotiationError, + PayloadTooBig, + ProtocolError, + SecurityError, + WebSocketException, + WebSocketProtocolError, + ) + from .legacy.auth import ( + BasicAuthWebSocketServerProtocol, + basic_auth_protocol_factory, + ) + from .legacy.client import WebSocketClientProtocol, connect, unix_connect + from .legacy.exceptions import ( + AbortHandshake, + InvalidMessage, + InvalidStatusCode, + RedirectHandshake, + ) + from .legacy.protocol import WebSocketCommonProtocol + from .legacy.server import ( + WebSocketServer, + WebSocketServerProtocol, + broadcast, + serve, + unix_serve, + ) + from .server import ServerProtocol + from .typing import ( + Data, + ExtensionName, + ExtensionParameter, + LoggerLike, + Origin, + StatusLike, + Subprotocol, + ) +else: + lazy_import( + globals(), + aliases={ + # .client + "ClientProtocol": ".client", + # .datastructures + "Headers": ".datastructures", + "HeadersLike": ".datastructures", + "MultipleValuesError": ".datastructures", + # .exceptions + "ConcurrencyError": ".exceptions", + "ConnectionClosed": ".exceptions", + "ConnectionClosedError": ".exceptions", + "ConnectionClosedOK": ".exceptions", + "DuplicateParameter": ".exceptions", + "InvalidHandshake": ".exceptions", + "InvalidHeader": ".exceptions", + "InvalidHeaderFormat": ".exceptions", + "InvalidHeaderValue": ".exceptions", + "InvalidOrigin": ".exceptions", + "InvalidParameterName": ".exceptions", + "InvalidParameterValue": ".exceptions", + "InvalidState": ".exceptions", + "InvalidStatus": ".exceptions", + "InvalidUpgrade": ".exceptions", + "InvalidURI": ".exceptions", + "NegotiationError": ".exceptions", + "PayloadTooBig": ".exceptions", + "ProtocolError": ".exceptions", + "SecurityError": ".exceptions", + "WebSocketException": ".exceptions", + "WebSocketProtocolError": ".exceptions", + # .legacy.auth + "BasicAuthWebSocketServerProtocol": ".legacy.auth", + "basic_auth_protocol_factory": ".legacy.auth", + # .legacy.client + "WebSocketClientProtocol": ".legacy.client", + "connect": ".legacy.client", + "unix_connect": ".legacy.client", + # .legacy.exceptions + "AbortHandshake": ".legacy.exceptions", + "InvalidMessage": ".legacy.exceptions", + "InvalidStatusCode": ".legacy.exceptions", + "RedirectHandshake": ".legacy.exceptions", + # .legacy.protocol + "WebSocketCommonProtocol": ".legacy.protocol", + # .legacy.server + "WebSocketServer": ".legacy.server", + "WebSocketServerProtocol": ".legacy.server", + "broadcast": ".legacy.server", + "serve": ".legacy.server", + "unix_serve": ".legacy.server", + # .server + "ServerProtocol": ".server", + # .typing + "Data": ".typing", + "ExtensionName": ".typing", + "ExtensionParameter": ".typing", + "LoggerLike": ".typing", + "Origin": ".typing", + "StatusLike": ".typing", + "Subprotocol": ".typing", + }, + deprecated_aliases={ + # deprecated in 9.0 - 2021-09-01 + "framing": ".legacy", + "handshake": ".legacy", + "parse_uri": ".uri", + "WebSocketURI": ".uri", + }, + ) diff --git a/hackaton/lib/python3.12/site-packages/websockets/__main__.py b/hackaton/lib/python3.12/site-packages/websockets/__main__.py new file mode 100644 index 0000000..8647481 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/websockets/__main__.py @@ -0,0 +1,159 @@ +from __future__ import annotations + +import argparse +import os +import signal +import sys +import threading + + +try: + import readline # noqa: F401 +except ImportError: # Windows has no `readline` normally + pass + +from .sync.client import ClientConnection, connect +from .version import version as websockets_version + + +if sys.platform == "win32": + + def win_enable_vt100() -> None: + """ + Enable VT-100 for console output on Windows. + + See also https://github.com/python/cpython/issues/73245. + + """ + import ctypes + + STD_OUTPUT_HANDLE = ctypes.c_uint(-11) + INVALID_HANDLE_VALUE = ctypes.c_uint(-1) + ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x004 + + handle = ctypes.windll.kernel32.GetStdHandle(STD_OUTPUT_HANDLE) + if handle == INVALID_HANDLE_VALUE: + raise RuntimeError("unable to obtain stdout handle") + + cur_mode = ctypes.c_uint() + if ctypes.windll.kernel32.GetConsoleMode(handle, ctypes.byref(cur_mode)) == 0: + raise RuntimeError("unable to query current console mode") + + # ctypes ints lack support for the required bit-OR operation. + # Temporarily convert to Py int, do the OR and convert back. + py_int_mode = int.from_bytes(cur_mode, sys.byteorder) + new_mode = ctypes.c_uint(py_int_mode | ENABLE_VIRTUAL_TERMINAL_PROCESSING) + + if ctypes.windll.kernel32.SetConsoleMode(handle, new_mode) == 0: + raise RuntimeError("unable to set console mode") + + +def print_during_input(string: str) -> None: + sys.stdout.write( + # Save cursor position + "\N{ESC}7" + # Add a new line + "\N{LINE FEED}" + # Move cursor up + "\N{ESC}[A" + # Insert blank line, scroll last line down + "\N{ESC}[L" + # Print string in the inserted blank line + f"{string}\N{LINE FEED}" + # Restore cursor position + "\N{ESC}8" + # Move cursor down + "\N{ESC}[B" + ) + sys.stdout.flush() + + +def print_over_input(string: str) -> None: + sys.stdout.write( + # Move cursor to beginning of line + "\N{CARRIAGE RETURN}" + # Delete current line + "\N{ESC}[K" + # Print string + f"{string}\N{LINE FEED}" + ) + sys.stdout.flush() + + +def print_incoming_messages(websocket: ClientConnection, stop: threading.Event) -> None: + for message in websocket: + if isinstance(message, str): + print_during_input("< " + message) + else: + print_during_input("< (binary) " + message.hex()) + if not stop.is_set(): + # When the server closes the connection, raise KeyboardInterrupt + # in the main thread to exit the program. + if sys.platform == "win32": + ctrl_c = signal.CTRL_C_EVENT + else: + ctrl_c = signal.SIGINT + os.kill(os.getpid(), ctrl_c) + + +def main() -> None: + # Parse command line arguments. + parser = argparse.ArgumentParser( + prog="python -m websockets", + description="Interactive WebSocket client.", + add_help=False, + ) + group = parser.add_mutually_exclusive_group() + group.add_argument("--version", action="store_true") + group.add_argument("uri", metavar="", nargs="?") + args = parser.parse_args() + + if args.version: + print(f"websockets {websockets_version}") + return + + if args.uri is None: + parser.error("the following arguments are required: ") + + # If we're on Windows, enable VT100 terminal support. + if sys.platform == "win32": + try: + win_enable_vt100() + except RuntimeError as exc: + sys.stderr.write( + f"Unable to set terminal to VT100 mode. This is only " + f"supported since Win10 anniversary update. Expect " + f"weird symbols on the terminal.\nError: {exc}\n" + ) + sys.stderr.flush() + + try: + websocket = connect(args.uri) + except Exception as exc: + print(f"Failed to connect to {args.uri}: {exc}.") + sys.exit(1) + else: + print(f"Connected to {args.uri}.") + + stop = threading.Event() + + # Start the thread that reads messages from the connection. + thread = threading.Thread(target=print_incoming_messages, args=(websocket, stop)) + thread.start() + + # Read from stdin in the main thread in order to receive signals. + try: + while True: + # Since there's no size limit, put_nowait is identical to put. + message = input("> ") + websocket.send(message) + except (KeyboardInterrupt, EOFError): # ^C, ^D + stop.set() + websocket.close() + print_over_input("Connection closed.") + + thread.join() + + +if __name__ == "__main__": + main() diff --git a/hackaton/lib/python3.12/site-packages/websockets/__pycache__/__init__.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/websockets/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..75700fd Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/websockets/__pycache__/__init__.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/websockets/__pycache__/__main__.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/websockets/__pycache__/__main__.cpython-312.pyc new file mode 100644 index 0000000..b225792 Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/websockets/__pycache__/__main__.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/websockets/__pycache__/auth.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/websockets/__pycache__/auth.cpython-312.pyc new file mode 100644 index 0000000..23c3ea6 Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/websockets/__pycache__/auth.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/websockets/__pycache__/client.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/websockets/__pycache__/client.cpython-312.pyc new file mode 100644 index 0000000..67b828f Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/websockets/__pycache__/client.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/websockets/__pycache__/connection.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/websockets/__pycache__/connection.cpython-312.pyc new file mode 100644 index 0000000..4efeefc Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/websockets/__pycache__/connection.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/websockets/__pycache__/datastructures.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/websockets/__pycache__/datastructures.cpython-312.pyc new file mode 100644 index 0000000..2f7b6b5 Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/websockets/__pycache__/datastructures.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/websockets/__pycache__/exceptions.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/websockets/__pycache__/exceptions.cpython-312.pyc new file mode 100644 index 0000000..ca8788e Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/websockets/__pycache__/exceptions.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/websockets/__pycache__/frames.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/websockets/__pycache__/frames.cpython-312.pyc new file mode 100644 index 0000000..424a05f Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/websockets/__pycache__/frames.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/websockets/__pycache__/headers.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/websockets/__pycache__/headers.cpython-312.pyc new file mode 100644 index 0000000..3fde56a Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/websockets/__pycache__/headers.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/websockets/__pycache__/http.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/websockets/__pycache__/http.cpython-312.pyc new file mode 100644 index 0000000..b0d33ed Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/websockets/__pycache__/http.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/websockets/__pycache__/http11.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/websockets/__pycache__/http11.cpython-312.pyc new file mode 100644 index 0000000..a63bee6 Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/websockets/__pycache__/http11.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/websockets/__pycache__/imports.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/websockets/__pycache__/imports.cpython-312.pyc new file mode 100644 index 0000000..5e5004c Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/websockets/__pycache__/imports.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/websockets/__pycache__/protocol.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/websockets/__pycache__/protocol.cpython-312.pyc new file mode 100644 index 0000000..b0a8cbc Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/websockets/__pycache__/protocol.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/websockets/__pycache__/server.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/websockets/__pycache__/server.cpython-312.pyc new file mode 100644 index 0000000..648610b Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/websockets/__pycache__/server.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/websockets/__pycache__/streams.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/websockets/__pycache__/streams.cpython-312.pyc new file mode 100644 index 0000000..d5f4da1 Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/websockets/__pycache__/streams.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/websockets/__pycache__/typing.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/websockets/__pycache__/typing.cpython-312.pyc new file mode 100644 index 0000000..c8becdf Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/websockets/__pycache__/typing.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/websockets/__pycache__/uri.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/websockets/__pycache__/uri.cpython-312.pyc new file mode 100644 index 0000000..917d886 Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/websockets/__pycache__/uri.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/websockets/__pycache__/utils.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/websockets/__pycache__/utils.cpython-312.pyc new file mode 100644 index 0000000..737c7f3 Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/websockets/__pycache__/utils.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/websockets/__pycache__/version.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/websockets/__pycache__/version.cpython-312.pyc new file mode 100644 index 0000000..abf3d78 Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/websockets/__pycache__/version.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/websockets/asyncio/__init__.py b/hackaton/lib/python3.12/site-packages/websockets/asyncio/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/hackaton/lib/python3.12/site-packages/websockets/asyncio/__pycache__/__init__.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/websockets/asyncio/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..3b15dc4 Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/websockets/asyncio/__pycache__/__init__.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/websockets/asyncio/__pycache__/async_timeout.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/websockets/asyncio/__pycache__/async_timeout.cpython-312.pyc new file mode 100644 index 0000000..608baad Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/websockets/asyncio/__pycache__/async_timeout.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/websockets/asyncio/__pycache__/client.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/websockets/asyncio/__pycache__/client.cpython-312.pyc new file mode 100644 index 0000000..5b42b54 Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/websockets/asyncio/__pycache__/client.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/websockets/asyncio/__pycache__/compatibility.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/websockets/asyncio/__pycache__/compatibility.cpython-312.pyc new file mode 100644 index 0000000..f03df79 Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/websockets/asyncio/__pycache__/compatibility.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/websockets/asyncio/__pycache__/connection.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/websockets/asyncio/__pycache__/connection.cpython-312.pyc new file mode 100644 index 0000000..b19755c Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/websockets/asyncio/__pycache__/connection.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/websockets/asyncio/__pycache__/messages.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/websockets/asyncio/__pycache__/messages.cpython-312.pyc new file mode 100644 index 0000000..13b0baa Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/websockets/asyncio/__pycache__/messages.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/websockets/asyncio/__pycache__/server.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/websockets/asyncio/__pycache__/server.cpython-312.pyc new file mode 100644 index 0000000..e361198 Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/websockets/asyncio/__pycache__/server.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/websockets/asyncio/async_timeout.py b/hackaton/lib/python3.12/site-packages/websockets/asyncio/async_timeout.py new file mode 100644 index 0000000..6ffa899 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/websockets/asyncio/async_timeout.py @@ -0,0 +1,282 @@ +# From https://github.com/aio-libs/async-timeout/blob/master/async_timeout/__init__.py +# Licensed under the Apache License (Apache-2.0) + +import asyncio +import enum +import sys +import warnings +from types import TracebackType +from typing import Optional, Type + + +if sys.version_info >= (3, 11): + from typing import final +else: + # From https://github.com/python/typing_extensions/blob/main/src/typing_extensions.py + # Licensed under the Python Software Foundation License (PSF-2.0) + + # @final exists in 3.8+, but we backport it for all versions + # before 3.11 to keep support for the __final__ attribute. + # See https://bugs.python.org/issue46342 + def final(f): + """This decorator can be used to indicate to type checkers that + the decorated method cannot be overridden, and decorated class + cannot be subclassed. For example: + + class Base: + @final + def done(self) -> None: + ... + class Sub(Base): + def done(self) -> None: # Error reported by type checker + ... + @final + class Leaf: + ... + class Other(Leaf): # Error reported by type checker + ... + + There is no runtime checking of these properties. The decorator + sets the ``__final__`` attribute to ``True`` on the decorated object + to allow runtime introspection. + """ + try: + f.__final__ = True + except (AttributeError, TypeError): + # Skip the attribute silently if it is not writable. + # AttributeError happens if the object has __slots__ or a + # read-only property, TypeError if it's a builtin class. + pass + return f + + # End https://github.com/python/typing_extensions/blob/main/src/typing_extensions.py + + +if sys.version_info >= (3, 11): + + def _uncancel_task(task: "asyncio.Task[object]") -> None: + task.uncancel() + +else: + + def _uncancel_task(task: "asyncio.Task[object]") -> None: + pass + + +__version__ = "4.0.3" + + +__all__ = ("timeout", "timeout_at", "Timeout") + + +def timeout(delay: Optional[float]) -> "Timeout": + """timeout context manager. + + Useful in cases when you want to apply timeout logic around block + of code or in cases when asyncio.wait_for is not suitable. For example: + + >>> async with timeout(0.001): + ... async with aiohttp.get('https://github.com') as r: + ... await r.text() + + + delay - value in seconds or None to disable timeout logic + """ + loop = asyncio.get_running_loop() + if delay is not None: + deadline = loop.time() + delay # type: Optional[float] + else: + deadline = None + return Timeout(deadline, loop) + + +def timeout_at(deadline: Optional[float]) -> "Timeout": + """Schedule the timeout at absolute time. + + deadline argument points on the time in the same clock system + as loop.time(). + + Please note: it is not POSIX time but a time with + undefined starting base, e.g. the time of the system power on. + + >>> async with timeout_at(loop.time() + 10): + ... async with aiohttp.get('https://github.com') as r: + ... await r.text() + + + """ + loop = asyncio.get_running_loop() + return Timeout(deadline, loop) + + +class _State(enum.Enum): + INIT = "INIT" + ENTER = "ENTER" + TIMEOUT = "TIMEOUT" + EXIT = "EXIT" + + +@final +class Timeout: + # Internal class, please don't instantiate it directly + # Use timeout() and timeout_at() public factories instead. + # + # Implementation note: `async with timeout()` is preferred + # over `with timeout()`. + # While technically the Timeout class implementation + # doesn't need to be async at all, + # the `async with` statement explicitly points that + # the context manager should be used from async function context. + # + # This design allows to avoid many silly misusages. + # + # TimeoutError is raised immediately when scheduled + # if the deadline is passed. + # The purpose is to time out as soon as possible + # without waiting for the next await expression. + + __slots__ = ("_deadline", "_loop", "_state", "_timeout_handler", "_task") + + def __init__( + self, deadline: Optional[float], loop: asyncio.AbstractEventLoop + ) -> None: + self._loop = loop + self._state = _State.INIT + + self._task: Optional["asyncio.Task[object]"] = None + self._timeout_handler = None # type: Optional[asyncio.Handle] + if deadline is None: + self._deadline = None # type: Optional[float] + else: + self.update(deadline) + + def __enter__(self) -> "Timeout": + warnings.warn( + "with timeout() is deprecated, use async with timeout() instead", + DeprecationWarning, + stacklevel=2, + ) + self._do_enter() + return self + + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> Optional[bool]: + self._do_exit(exc_type) + return None + + async def __aenter__(self) -> "Timeout": + self._do_enter() + return self + + async def __aexit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> Optional[bool]: + self._do_exit(exc_type) + return None + + @property + def expired(self) -> bool: + """Is timeout expired during execution?""" + return self._state == _State.TIMEOUT + + @property + def deadline(self) -> Optional[float]: + return self._deadline + + def reject(self) -> None: + """Reject scheduled timeout if any.""" + # cancel is maybe better name but + # task.cancel() raises CancelledError in asyncio world. + if self._state not in (_State.INIT, _State.ENTER): + raise RuntimeError(f"invalid state {self._state.value}") + self._reject() + + def _reject(self) -> None: + self._task = None + if self._timeout_handler is not None: + self._timeout_handler.cancel() + self._timeout_handler = None + + def shift(self, delay: float) -> None: + """Advance timeout on delay seconds. + + The delay can be negative. + + Raise RuntimeError if shift is called when deadline is not scheduled + """ + deadline = self._deadline + if deadline is None: + raise RuntimeError("cannot shift timeout if deadline is not scheduled") + self.update(deadline + delay) + + def update(self, deadline: float) -> None: + """Set deadline to absolute value. + + deadline argument points on the time in the same clock system + as loop.time(). + + If new deadline is in the past the timeout is raised immediately. + + Please note: it is not POSIX time but a time with + undefined starting base, e.g. the time of the system power on. + """ + if self._state == _State.EXIT: + raise RuntimeError("cannot reschedule after exit from context manager") + if self._state == _State.TIMEOUT: + raise RuntimeError("cannot reschedule expired timeout") + if self._timeout_handler is not None: + self._timeout_handler.cancel() + self._deadline = deadline + if self._state != _State.INIT: + self._reschedule() + + def _reschedule(self) -> None: + assert self._state == _State.ENTER + deadline = self._deadline + if deadline is None: + return + + now = self._loop.time() + if self._timeout_handler is not None: + self._timeout_handler.cancel() + + self._task = asyncio.current_task() + if deadline <= now: + self._timeout_handler = self._loop.call_soon(self._on_timeout) + else: + self._timeout_handler = self._loop.call_at(deadline, self._on_timeout) + + def _do_enter(self) -> None: + if self._state != _State.INIT: + raise RuntimeError(f"invalid state {self._state.value}") + self._state = _State.ENTER + self._reschedule() + + def _do_exit(self, exc_type: Optional[Type[BaseException]]) -> None: + if exc_type is asyncio.CancelledError and self._state == _State.TIMEOUT: + assert self._task is not None + _uncancel_task(self._task) + self._timeout_handler = None + self._task = None + raise asyncio.TimeoutError + # timeout has not expired + self._state = _State.EXIT + self._reject() + return None + + def _on_timeout(self) -> None: + assert self._task is not None + self._task.cancel() + self._state = _State.TIMEOUT + # drop the reference early + self._timeout_handler = None + + +# End https://github.com/aio-libs/async-timeout/blob/master/async_timeout/__init__.py diff --git a/hackaton/lib/python3.12/site-packages/websockets/asyncio/client.py b/hackaton/lib/python3.12/site-packages/websockets/asyncio/client.py new file mode 100644 index 0000000..b1beb3e --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/websockets/asyncio/client.py @@ -0,0 +1,561 @@ +from __future__ import annotations + +import asyncio +import logging +import os +import urllib.parse +from types import TracebackType +from typing import Any, AsyncIterator, Callable, Generator, Sequence + +from ..client import ClientProtocol, backoff +from ..datastructures import HeadersLike +from ..exceptions import InvalidStatus, SecurityError +from ..extensions.base import ClientExtensionFactory +from ..extensions.permessage_deflate import enable_client_permessage_deflate +from ..headers import validate_subprotocols +from ..http11 import USER_AGENT, Response +from ..protocol import CONNECTING, Event +from ..typing import LoggerLike, Origin, Subprotocol +from ..uri import WebSocketURI, parse_uri +from .compatibility import TimeoutError, asyncio_timeout +from .connection import Connection + + +__all__ = ["connect", "unix_connect", "ClientConnection"] + +MAX_REDIRECTS = int(os.environ.get("WEBSOCKETS_MAX_REDIRECTS", "10")) + + +class ClientConnection(Connection): + """ + :mod:`asyncio` implementation of a WebSocket client connection. + + :class:`ClientConnection` provides :meth:`recv` and :meth:`send` coroutines + for receiving and sending messages. + + It supports asynchronous iteration to receive messages:: + + async for message in websocket: + await process(message) + + The iterator exits normally when the connection is closed with close code + 1000 (OK) or 1001 (going away) or without a close code. It raises a + :exc:`~websockets.exceptions.ConnectionClosedError` when the connection is + closed with any other code. + + The ``ping_interval``, ``ping_timeout``, ``close_timeout``, ``max_queue``, + and ``write_limit`` arguments the same meaning as in :func:`connect`. + + Args: + protocol: Sans-I/O connection. + + """ + + def __init__( + self, + protocol: ClientProtocol, + *, + ping_interval: float | None = 20, + ping_timeout: float | None = 20, + close_timeout: float | None = 10, + max_queue: int | tuple[int, int | None] = 16, + write_limit: int | tuple[int, int | None] = 2**15, + ) -> None: + self.protocol: ClientProtocol + super().__init__( + protocol, + ping_interval=ping_interval, + ping_timeout=ping_timeout, + close_timeout=close_timeout, + max_queue=max_queue, + write_limit=write_limit, + ) + self.response_rcvd: asyncio.Future[None] = self.loop.create_future() + + async def handshake( + self, + additional_headers: HeadersLike | None = None, + user_agent_header: str | None = USER_AGENT, + ) -> None: + """ + Perform the opening handshake. + + """ + async with self.send_context(expected_state=CONNECTING): + self.request = self.protocol.connect() + if additional_headers is not None: + self.request.headers.update(additional_headers) + if user_agent_header: + self.request.headers["User-Agent"] = user_agent_header + self.protocol.send_request(self.request) + + await asyncio.wait( + [self.response_rcvd, self.connection_lost_waiter], + return_when=asyncio.FIRST_COMPLETED, + ) + + # self.protocol.handshake_exc is always set when the connection is lost + # before receiving a response, when the response cannot be parsed, or + # when the response fails the handshake. + + if self.protocol.handshake_exc is not None: + raise self.protocol.handshake_exc + + def process_event(self, event: Event) -> None: + """ + Process one incoming event. + + """ + # First event - handshake response. + if self.response is None: + assert isinstance(event, Response) + self.response = event + self.response_rcvd.set_result(None) + # Later events - frames. + else: + super().process_event(event) + + +def process_exception(exc: Exception) -> Exception | None: + """ + Determine whether a connection error is retryable or fatal. + + When reconnecting automatically with ``async for ... in connect(...)``, if a + connection attempt fails, :func:`process_exception` is called to determine + whether to retry connecting or to raise the exception. + + This function defines the default behavior, which is to retry on: + + * :exc:`EOFError`, :exc:`OSError`, :exc:`asyncio.TimeoutError`: network + errors; + * :exc:`~websockets.exceptions.InvalidStatus` when the status code is 500, + 502, 503, or 504: server or proxy errors. + + All other exceptions are considered fatal. + + You can change this behavior with the ``process_exception`` argument of + :func:`connect`. + + Return :obj:`None` if the exception is retryable i.e. when the error could + be transient and trying to reconnect with the same parameters could succeed. + The exception will be logged at the ``INFO`` level. + + Return an exception, either ``exc`` or a new exception, if the exception is + fatal i.e. when trying to reconnect will most likely produce the same error. + That exception will be raised, breaking out of the retry loop. + + """ + if isinstance(exc, (EOFError, OSError, asyncio.TimeoutError)): + return None + if isinstance(exc, InvalidStatus) and exc.response.status_code in [ + 500, # Internal Server Error + 502, # Bad Gateway + 503, # Service Unavailable + 504, # Gateway Timeout + ]: + return None + return exc + + +# This is spelled in lower case because it's exposed as a callable in the API. +class connect: + """ + Connect to the WebSocket server at ``uri``. + + This coroutine returns a :class:`ClientConnection` instance, which you can + use to send and receive messages. + + :func:`connect` may be used as an asynchronous context manager:: + + from websockets.asyncio.client import connect + + async with connect(...) as websocket: + ... + + The connection is closed automatically when exiting the context. + + :func:`connect` can be used as an infinite asynchronous iterator to + reconnect automatically on errors:: + + async for websocket in connect(...): + try: + ... + except websockets.ConnectionClosed: + continue + + If the connection fails with a transient error, it is retried with + exponential backoff. If it fails with a fatal error, the exception is + raised, breaking out of the loop. + + The connection is closed automatically after each iteration of the loop. + + Args: + uri: URI of the WebSocket server. + origin: Value of the ``Origin`` header, for servers that require it. + extensions: List of supported extensions, in order in which they + should be negotiated and run. + subprotocols: List of supported subprotocols, in order of decreasing + preference. + additional_headers (HeadersLike | None): Arbitrary HTTP headers to add + to the handshake request. + user_agent_header: Value of the ``User-Agent`` request header. + It defaults to ``"Python/x.y.z websockets/X.Y"``. + Setting it to :obj:`None` removes the header. + compression: The "permessage-deflate" extension is enabled by default. + Set ``compression`` to :obj:`None` to disable it. See the + :doc:`compression guide <../../topics/compression>` for details. + process_exception: When reconnecting automatically, tell whether an + error is transient or fatal. The default behavior is defined by + :func:`process_exception`. Refer to its documentation for details. + open_timeout: Timeout for opening the connection in seconds. + :obj:`None` disables the timeout. + ping_interval: Interval between keepalive pings in seconds. + :obj:`None` disables keepalive. + ping_timeout: Timeout for keepalive pings in seconds. + :obj:`None` disables timeouts. + close_timeout: Timeout for closing the connection in seconds. + :obj:`None` disables the timeout. + max_size: Maximum size of incoming messages in bytes. + :obj:`None` disables the limit. + max_queue: High-water mark of the buffer where frames are received. + It defaults to 16 frames. The low-water mark defaults to ``max_queue + // 4``. You may pass a ``(high, low)`` tuple to set the high-water + and low-water marks. + write_limit: High-water mark of write buffer in bytes. It is passed to + :meth:`~asyncio.WriteTransport.set_write_buffer_limits`. It defaults + to 32 KiB. You may pass a ``(high, low)`` tuple to set the + high-water and low-water marks. + logger: Logger for this client. + It defaults to ``logging.getLogger("websockets.client")``. + See the :doc:`logging guide <../../topics/logging>` for details. + create_connection: Factory for the :class:`ClientConnection` managing + the connection. Set it to a wrapper or a subclass to customize + connection handling. + + Any other keyword arguments are passed to the event loop's + :meth:`~asyncio.loop.create_connection` method. + + For example: + + * You can set ``ssl`` to a :class:`~ssl.SSLContext` to enforce TLS settings. + When connecting to a ``wss://`` URI, if ``ssl`` isn't provided, a TLS + context is created with :func:`~ssl.create_default_context`. + + * You can set ``server_hostname`` to override the host name from ``uri`` in + the TLS handshake. + + * You can set ``host`` and ``port`` to connect to a different host and port + from those found in ``uri``. This only changes the destination of the TCP + connection. The host name from ``uri`` is still used in the TLS handshake + for secure connections and in the ``Host`` header. + + * You can set ``sock`` to provide a preexisting TCP socket. You may call + :func:`socket.create_connection` (not to be confused with the event loop's + :meth:`~asyncio.loop.create_connection` method) to create a suitable + client socket and customize it. + + Raises: + InvalidURI: If ``uri`` isn't a valid WebSocket URI. + OSError: If the TCP connection fails. + InvalidHandshake: If the opening handshake fails. + TimeoutError: If the opening handshake times out. + + """ + + def __init__( + self, + uri: str, + *, + # WebSocket + origin: Origin | None = None, + extensions: Sequence[ClientExtensionFactory] | None = None, + subprotocols: Sequence[Subprotocol] | None = None, + additional_headers: HeadersLike | None = None, + user_agent_header: str | None = USER_AGENT, + compression: str | None = "deflate", + process_exception: Callable[[Exception], Exception | None] = process_exception, + # Timeouts + open_timeout: float | None = 10, + ping_interval: float | None = 20, + ping_timeout: float | None = 20, + close_timeout: float | None = 10, + # Limits + max_size: int | None = 2**20, + max_queue: int | tuple[int, int | None] = 16, + write_limit: int | tuple[int, int | None] = 2**15, + # Logging + logger: LoggerLike | None = None, + # Escape hatch for advanced customization + create_connection: type[ClientConnection] | None = None, + # Other keyword arguments are passed to loop.create_connection + **kwargs: Any, + ) -> None: + self.uri = uri + + if subprotocols is not None: + validate_subprotocols(subprotocols) + + if compression == "deflate": + extensions = enable_client_permessage_deflate(extensions) + elif compression is not None: + raise ValueError(f"unsupported compression: {compression}") + + if logger is None: + logger = logging.getLogger("websockets.client") + + if create_connection is None: + create_connection = ClientConnection + + def protocol_factory(wsuri: WebSocketURI) -> ClientConnection: + # This is a protocol in the Sans-I/O implementation of websockets. + protocol = ClientProtocol( + wsuri, + origin=origin, + extensions=extensions, + subprotocols=subprotocols, + max_size=max_size, + logger=logger, + ) + # This is a connection in websockets and a protocol in asyncio. + connection = create_connection( + protocol, + ping_interval=ping_interval, + ping_timeout=ping_timeout, + close_timeout=close_timeout, + max_queue=max_queue, + write_limit=write_limit, + ) + return connection + + self.protocol_factory = protocol_factory + self.handshake_args = ( + additional_headers, + user_agent_header, + ) + self.process_exception = process_exception + self.open_timeout = open_timeout + self.logger = logger + self.connection_kwargs = kwargs + + async def create_connection(self) -> ClientConnection: + """Create TCP or Unix connection.""" + loop = asyncio.get_running_loop() + + wsuri = parse_uri(self.uri) + kwargs = self.connection_kwargs.copy() + + def factory() -> ClientConnection: + return self.protocol_factory(wsuri) + + if wsuri.secure: + kwargs.setdefault("ssl", True) + kwargs.setdefault("server_hostname", wsuri.host) + if kwargs.get("ssl") is None: + raise TypeError("ssl=None is incompatible with a wss:// URI") + else: + if kwargs.get("ssl") is not None: + raise TypeError("ssl argument is incompatible with a ws:// URI") + + if kwargs.pop("unix", False): + _, connection = await loop.create_unix_connection(factory, **kwargs) + else: + if kwargs.get("sock") is None: + kwargs.setdefault("host", wsuri.host) + kwargs.setdefault("port", wsuri.port) + _, connection = await loop.create_connection(factory, **kwargs) + return connection + + def process_redirect(self, exc: Exception) -> Exception | str: + """ + Determine whether a connection error is a redirect that can be followed. + + Return the new URI if it's a valid redirect. Else, return an exception. + + """ + if not ( + isinstance(exc, InvalidStatus) + and exc.response.status_code + in [ + 300, # Multiple Choices + 301, # Moved Permanently + 302, # Found + 303, # See Other + 307, # Temporary Redirect + 308, # Permanent Redirect + ] + and "Location" in exc.response.headers + ): + return exc + + old_wsuri = parse_uri(self.uri) + new_uri = urllib.parse.urljoin(self.uri, exc.response.headers["Location"]) + new_wsuri = parse_uri(new_uri) + + # If connect() received a socket, it is closed and cannot be reused. + if self.connection_kwargs.get("sock") is not None: + return ValueError( + f"cannot follow redirect to {new_uri} with a preexisting socket" + ) + + # TLS downgrade is forbidden. + if old_wsuri.secure and not new_wsuri.secure: + return SecurityError(f"cannot follow redirect to non-secure URI {new_uri}") + + # Apply restrictions to cross-origin redirects. + if ( + old_wsuri.secure != new_wsuri.secure + or old_wsuri.host != new_wsuri.host + or old_wsuri.port != new_wsuri.port + ): + # Cross-origin redirects on Unix sockets don't quite make sense. + if self.connection_kwargs.get("unix", False): + return ValueError( + f"cannot follow cross-origin redirect to {new_uri} " + f"with a Unix socket" + ) + + # Cross-origin redirects when host and port are overridden are ill-defined. + if ( + self.connection_kwargs.get("host") is not None + or self.connection_kwargs.get("port") is not None + ): + return ValueError( + f"cannot follow cross-origin redirect to {new_uri} " + f"with an explicit host or port" + ) + + return new_uri + + # ... = await connect(...) + + def __await__(self) -> Generator[Any, None, ClientConnection]: + # Create a suitable iterator by calling __await__ on a coroutine. + return self.__await_impl__().__await__() + + async def __await_impl__(self) -> ClientConnection: + try: + async with asyncio_timeout(self.open_timeout): + for _ in range(MAX_REDIRECTS): + self.connection = await self.create_connection() + try: + await self.connection.handshake(*self.handshake_args) + except asyncio.CancelledError: + self.connection.close_transport() + raise + except Exception as exc: + # Always close the connection even though keep-alive is + # the default in HTTP/1.1 because create_connection ties + # opening the network connection with initializing the + # protocol. In the current design of connect(), there is + # no easy way to reuse the network connection that works + # in every case nor to reinitialize the protocol. + self.connection.close_transport() + + uri_or_exc = self.process_redirect(exc) + # Response is a valid redirect; follow it. + if isinstance(uri_or_exc, str): + self.uri = uri_or_exc + continue + # Response isn't a valid redirect; raise the exception. + if uri_or_exc is exc: + raise + else: + raise uri_or_exc from exc + + else: + self.connection.start_keepalive() + return self.connection + else: + raise SecurityError(f"more than {MAX_REDIRECTS} redirects") + + except TimeoutError: + # Re-raise exception with an informative error message. + raise TimeoutError("timed out during handshake") from None + + # ... = yield from connect(...) - remove when dropping Python < 3.10 + + __iter__ = __await__ + + # async with connect(...) as ...: ... + + async def __aenter__(self) -> ClientConnection: + return await self + + async def __aexit__( + self, + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + traceback: TracebackType | None, + ) -> None: + await self.connection.close() + + # async for ... in connect(...): + + async def __aiter__(self) -> AsyncIterator[ClientConnection]: + delays: Generator[float, None, None] | None = None + while True: + try: + async with self as protocol: + yield protocol + except Exception as exc: + # Determine whether the exception is retryable or fatal. + # The API of process_exception is "return an exception or None"; + # "raise an exception" is also supported because it's a frequent + # mistake. It isn't documented in order to keep the API simple. + try: + new_exc = self.process_exception(exc) + except Exception as raised_exc: + new_exc = raised_exc + + # The connection failed with a fatal error. + # Raise the exception and exit the loop. + if new_exc is exc: + raise + if new_exc is not None: + raise new_exc from exc + + # The connection failed with a retryable error. + # Start or continue backoff and reconnect. + if delays is None: + delays = backoff() + delay = next(delays) + self.logger.info( + "! connect failed; reconnecting in %.1f seconds", + delay, + exc_info=True, + ) + await asyncio.sleep(delay) + continue + + else: + # The connection succeeded. Reset backoff. + delays = None + + +def unix_connect( + path: str | None = None, + uri: str | None = None, + **kwargs: Any, +) -> connect: + """ + Connect to a WebSocket server listening on a Unix socket. + + This function accepts the same keyword arguments as :func:`connect`. + + It's only available on Unix. + + It's mainly useful for debugging servers listening on Unix sockets. + + Args: + path: File system path to the Unix socket. + uri: URI of the WebSocket server. ``uri`` defaults to + ``ws://localhost/`` or, when a ``ssl`` argument is provided, to + ``wss://localhost/``. + + """ + if uri is None: + if kwargs.get("ssl") is None: + uri = "ws://localhost/" + else: + uri = "wss://localhost/" + return connect(uri=uri, unix=True, path=path, **kwargs) diff --git a/hackaton/lib/python3.12/site-packages/websockets/asyncio/compatibility.py b/hackaton/lib/python3.12/site-packages/websockets/asyncio/compatibility.py new file mode 100644 index 0000000..e170000 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/websockets/asyncio/compatibility.py @@ -0,0 +1,30 @@ +from __future__ import annotations + +import sys + + +__all__ = ["TimeoutError", "aiter", "anext", "asyncio_timeout", "asyncio_timeout_at"] + + +if sys.version_info[:2] >= (3, 11): + TimeoutError = TimeoutError + aiter = aiter + anext = anext + from asyncio import ( + timeout as asyncio_timeout, # noqa: F401 + timeout_at as asyncio_timeout_at, # noqa: F401 + ) + +else: # Python < 3.11 + from asyncio import TimeoutError + + def aiter(async_iterable): + return type(async_iterable).__aiter__(async_iterable) + + async def anext(async_iterator): + return await type(async_iterator).__anext__(async_iterator) + + from .async_timeout import ( + timeout as asyncio_timeout, # noqa: F401 + timeout_at as asyncio_timeout_at, # noqa: F401 + ) diff --git a/hackaton/lib/python3.12/site-packages/websockets/asyncio/connection.py b/hackaton/lib/python3.12/site-packages/websockets/asyncio/connection.py new file mode 100644 index 0000000..6af61a4 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/websockets/asyncio/connection.py @@ -0,0 +1,1148 @@ +from __future__ import annotations + +import asyncio +import collections +import contextlib +import logging +import random +import struct +import sys +import uuid +from types import TracebackType +from typing import ( + Any, + AsyncIterable, + AsyncIterator, + Awaitable, + Iterable, + Mapping, + cast, +) + +from ..exceptions import ( + ConcurrencyError, + ConnectionClosed, + ConnectionClosedOK, + ProtocolError, +) +from ..frames import DATA_OPCODES, BytesLike, CloseCode, Frame, Opcode +from ..http11 import Request, Response +from ..protocol import CLOSED, OPEN, Event, Protocol, State +from ..typing import Data, LoggerLike, Subprotocol +from .compatibility import ( + TimeoutError, + aiter, + anext, + asyncio_timeout, + asyncio_timeout_at, +) +from .messages import Assembler + + +__all__ = ["Connection"] + + +class Connection(asyncio.Protocol): + """ + :mod:`asyncio` implementation of a WebSocket connection. + + :class:`Connection` provides APIs shared between WebSocket servers and + clients. + + You shouldn't use it directly. Instead, use + :class:`~websockets.asyncio.client.ClientConnection` or + :class:`~websockets.asyncio.server.ServerConnection`. + + """ + + def __init__( + self, + protocol: Protocol, + *, + ping_interval: float | None = 20, + ping_timeout: float | None = 20, + close_timeout: float | None = 10, + max_queue: int | tuple[int, int | None] = 16, + write_limit: int | tuple[int, int | None] = 2**15, + ) -> None: + self.protocol = protocol + self.ping_interval = ping_interval + self.ping_timeout = ping_timeout + self.close_timeout = close_timeout + if isinstance(max_queue, int): + max_queue = (max_queue, None) + self.max_queue = max_queue + if isinstance(write_limit, int): + write_limit = (write_limit, None) + self.write_limit = write_limit + + # Inject reference to this instance in the protocol's logger. + self.protocol.logger = logging.LoggerAdapter( + self.protocol.logger, + {"websocket": self}, + ) + + # Copy attributes from the protocol for convenience. + self.id: uuid.UUID = self.protocol.id + """Unique identifier of the connection. Useful in logs.""" + self.logger: LoggerLike = self.protocol.logger + """Logger for this connection.""" + self.debug = self.protocol.debug + + # HTTP handshake request and response. + self.request: Request | None = None + """Opening handshake request.""" + self.response: Response | None = None + """Opening handshake response.""" + + # Event loop running this connection. + self.loop = asyncio.get_running_loop() + + # Assembler turning frames into messages and serializing reads. + self.recv_messages: Assembler # initialized in connection_made + + # Deadline for the closing handshake. + self.close_deadline: float | None = None + + # Protect sending fragmented messages. + self.fragmented_send_waiter: asyncio.Future[None] | None = None + + # Mapping of ping IDs to pong waiters, in chronological order. + self.pong_waiters: dict[bytes, tuple[asyncio.Future[float], float]] = {} + + self.latency: float = 0 + """ + Latency of the connection, in seconds. + + Latency is defined as the round-trip time of the connection. It is + measured by sending a Ping frame and waiting for a matching Pong frame. + Before the first measurement, :attr:`latency` is ``0``. + + By default, websockets enables a :ref:`keepalive ` mechanism + that sends Ping frames automatically at regular intervals. You can also + send Ping frames and measure latency with :meth:`ping`. + """ + + # Task that sends keepalive pings. None when ping_interval is None. + self.keepalive_task: asyncio.Task[None] | None = None + + # Exception raised while reading from the connection, to be chained to + # ConnectionClosed in order to show why the TCP connection dropped. + self.recv_exc: BaseException | None = None + + # Completed when the TCP connection is closed and the WebSocket + # connection state becomes CLOSED. + self.connection_lost_waiter: asyncio.Future[None] = self.loop.create_future() + + # Adapted from asyncio.FlowControlMixin + self.paused: bool = False + self.drain_waiters: collections.deque[asyncio.Future[None]] = ( + collections.deque() + ) + + # Public attributes + + @property + def local_address(self) -> Any: + """ + Local address of the connection. + + For IPv4 connections, this is a ``(host, port)`` tuple. + + The format of the address depends on the address family. + See :meth:`~socket.socket.getsockname`. + + """ + return self.transport.get_extra_info("sockname") + + @property + def remote_address(self) -> Any: + """ + Remote address of the connection. + + For IPv4 connections, this is a ``(host, port)`` tuple. + + The format of the address depends on the address family. + See :meth:`~socket.socket.getpeername`. + + """ + return self.transport.get_extra_info("peername") + + @property + def state(self) -> State: + """ + State of the WebSocket connection, defined in :rfc:`6455`. + + This attribute is provided for completeness. Typical applications + shouldn't check its value. Instead, they should call :meth:`~recv` or + :meth:`send` and handle :exc:`~websockets.exceptions.ConnectionClosed` + exceptions. + + """ + return self.protocol.state + + @property + def subprotocol(self) -> Subprotocol | None: + """ + Subprotocol negotiated during the opening handshake. + + :obj:`None` if no subprotocol was negotiated. + + """ + return self.protocol.subprotocol + + # Public methods + + async def __aenter__(self) -> Connection: + return self + + async def __aexit__( + self, + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + traceback: TracebackType | None, + ) -> None: + if exc_type is None: + await self.close() + else: + await self.close(CloseCode.INTERNAL_ERROR) + + async def __aiter__(self) -> AsyncIterator[Data]: + """ + Iterate on incoming messages. + + The iterator calls :meth:`recv` and yields messages asynchronously in an + infinite loop. + + It exits when the connection is closed normally. It raises a + :exc:`~websockets.exceptions.ConnectionClosedError` exception after a + protocol error or a network failure. + + """ + try: + while True: + yield await self.recv() + except ConnectionClosedOK: + return + + async def recv(self, decode: bool | None = None) -> Data: + """ + Receive the next message. + + When the connection is closed, :meth:`recv` raises + :exc:`~websockets.exceptions.ConnectionClosed`. Specifically, it raises + :exc:`~websockets.exceptions.ConnectionClosedOK` after a normal closure + and :exc:`~websockets.exceptions.ConnectionClosedError` after a protocol + error or a network failure. This is how you detect the end of the + message stream. + + Canceling :meth:`recv` is safe. There's no risk of losing data. The next + invocation of :meth:`recv` will return the next message. + + This makes it possible to enforce a timeout by wrapping :meth:`recv` in + :func:`~asyncio.timeout` or :func:`~asyncio.wait_for`. + + When the message is fragmented, :meth:`recv` waits until all fragments + are received, reassembles them, and returns the whole message. + + Args: + decode: Set this flag to override the default behavior of returning + :class:`str` or :class:`bytes`. See below for details. + + Returns: + A string (:class:`str`) for a Text_ frame or a bytestring + (:class:`bytes`) for a Binary_ frame. + + .. _Text: https://datatracker.ietf.org/doc/html/rfc6455#section-5.6 + .. _Binary: https://datatracker.ietf.org/doc/html/rfc6455#section-5.6 + + You may override this behavior with the ``decode`` argument: + + * Set ``decode=False`` to disable UTF-8 decoding of Text_ frames + and return a bytestring (:class:`bytes`). This may be useful to + optimize performance when decoding isn't needed. + * Set ``decode=True`` to force UTF-8 decoding of Binary_ frames + and return a string (:class:`str`). This is useful for servers + that send binary frames instead of text frames. + + Raises: + ConnectionClosed: When the connection is closed. + ConcurrencyError: If two coroutines call :meth:`recv` or + :meth:`recv_streaming` concurrently. + + """ + try: + return await self.recv_messages.get(decode) + except EOFError: + # Wait for the protocol state to be CLOSED before accessing close_exc. + await asyncio.shield(self.connection_lost_waiter) + raise self.protocol.close_exc from self.recv_exc + except ConcurrencyError: + raise ConcurrencyError( + "cannot call recv while another coroutine " + "is already running recv or recv_streaming" + ) from None + + async def recv_streaming(self, decode: bool | None = None) -> AsyncIterator[Data]: + """ + Receive the next message frame by frame. + + This method is designed for receiving fragmented messages. It returns an + asynchronous iterator that yields each fragment as it is received. This + iterator must be fully consumed. Else, future calls to :meth:`recv` or + :meth:`recv_streaming` will raise + :exc:`~websockets.exceptions.ConcurrencyError`, making the connection + unusable. + + :meth:`recv_streaming` raises the same exceptions as :meth:`recv`. + + Canceling :meth:`recv_streaming` before receiving the first frame is + safe. Canceling it after receiving one or more frames leaves the + iterator in a partially consumed state, making the connection unusable. + Instead, you should close the connection with :meth:`close`. + + Args: + decode: Set this flag to override the default behavior of returning + :class:`str` or :class:`bytes`. See below for details. + + Returns: + An iterator of strings (:class:`str`) for a Text_ frame or + bytestrings (:class:`bytes`) for a Binary_ frame. + + .. _Text: https://datatracker.ietf.org/doc/html/rfc6455#section-5.6 + .. _Binary: https://datatracker.ietf.org/doc/html/rfc6455#section-5.6 + + You may override this behavior with the ``decode`` argument: + + * Set ``decode=False`` to disable UTF-8 decoding of Text_ frames + and return bytestrings (:class:`bytes`). This may be useful to + optimize performance when decoding isn't needed. + * Set ``decode=True`` to force UTF-8 decoding of Binary_ frames + and return strings (:class:`str`). This is useful for servers + that send binary frames instead of text frames. + + Raises: + ConnectionClosed: When the connection is closed. + ConcurrencyError: If two coroutines call :meth:`recv` or + :meth:`recv_streaming` concurrently. + + """ + try: + async for frame in self.recv_messages.get_iter(decode): + yield frame + except EOFError: + # Wait for the protocol state to be CLOSED before accessing close_exc. + await asyncio.shield(self.connection_lost_waiter) + raise self.protocol.close_exc from self.recv_exc + except ConcurrencyError: + raise ConcurrencyError( + "cannot call recv_streaming while another coroutine " + "is already running recv or recv_streaming" + ) from None + + async def send(self, message: Data | Iterable[Data] | AsyncIterable[Data]) -> None: + """ + Send a message. + + A string (:class:`str`) is sent as a Text_ frame. A bytestring or + bytes-like object (:class:`bytes`, :class:`bytearray`, or + :class:`memoryview`) is sent as a Binary_ frame. + + .. _Text: https://datatracker.ietf.org/doc/html/rfc6455#section-5.6 + .. _Binary: https://datatracker.ietf.org/doc/html/rfc6455#section-5.6 + + :meth:`send` also accepts an iterable or an asynchronous iterable of + strings, bytestrings, or bytes-like objects to enable fragmentation_. + Each item is treated as a message fragment and sent in its own frame. + All items must be of the same type, or else :meth:`send` will raise a + :exc:`TypeError` and the connection will be closed. + + .. _fragmentation: https://datatracker.ietf.org/doc/html/rfc6455#section-5.4 + + :meth:`send` rejects dict-like objects because this is often an error. + (If you really want to send the keys of a dict-like object as fragments, + call its :meth:`~dict.keys` method and pass the result to :meth:`send`.) + + Canceling :meth:`send` is discouraged. Instead, you should close the + connection with :meth:`close`. Indeed, there are only two situations + where :meth:`send` may yield control to the event loop and then get + canceled; in both cases, :meth:`close` has the same effect and is + more clear: + + 1. The write buffer is full. If you don't want to wait until enough + data is sent, your only alternative is to close the connection. + :meth:`close` will likely time out then abort the TCP connection. + 2. ``message`` is an asynchronous iterator that yields control. + Stopping in the middle of a fragmented message will cause a + protocol error and the connection will be closed. + + When the connection is closed, :meth:`send` raises + :exc:`~websockets.exceptions.ConnectionClosed`. Specifically, it + raises :exc:`~websockets.exceptions.ConnectionClosedOK` after a normal + connection closure and + :exc:`~websockets.exceptions.ConnectionClosedError` after a protocol + error or a network failure. + + Args: + message: Message to send. + + Raises: + ConnectionClosed: When the connection is closed. + TypeError: If ``message`` doesn't have a supported type. + + """ + # While sending a fragmented message, prevent sending other messages + # until all fragments are sent. + while self.fragmented_send_waiter is not None: + await asyncio.shield(self.fragmented_send_waiter) + + # Unfragmented message -- this case must be handled first because + # strings and bytes-like objects are iterable. + + if isinstance(message, str): + async with self.send_context(): + self.protocol.send_text(message.encode()) + + elif isinstance(message, BytesLike): + async with self.send_context(): + self.protocol.send_binary(message) + + # Catch a common mistake -- passing a dict to send(). + + elif isinstance(message, Mapping): + raise TypeError("data is a dict-like object") + + # Fragmented message -- regular iterator. + + elif isinstance(message, Iterable): + chunks = iter(message) + try: + chunk = next(chunks) + except StopIteration: + return + + assert self.fragmented_send_waiter is None + self.fragmented_send_waiter = self.loop.create_future() + try: + # First fragment. + if isinstance(chunk, str): + text = True + async with self.send_context(): + self.protocol.send_text( + chunk.encode(), + fin=False, + ) + elif isinstance(chunk, BytesLike): + text = False + async with self.send_context(): + self.protocol.send_binary( + chunk, + fin=False, + ) + else: + raise TypeError("iterable must contain bytes or str") + + # Other fragments + for chunk in chunks: + if isinstance(chunk, str) and text: + async with self.send_context(): + self.protocol.send_continuation( + chunk.encode(), + fin=False, + ) + elif isinstance(chunk, BytesLike) and not text: + async with self.send_context(): + self.protocol.send_continuation( + chunk, + fin=False, + ) + else: + raise TypeError("iterable must contain uniform types") + + # Final fragment. + async with self.send_context(): + self.protocol.send_continuation(b"", fin=True) + + except Exception: + # We're half-way through a fragmented message and we can't + # complete it. This makes the connection unusable. + async with self.send_context(): + self.protocol.fail(1011, "error in fragmented message") + raise + + finally: + self.fragmented_send_waiter.set_result(None) + self.fragmented_send_waiter = None + + # Fragmented message -- async iterator. + + elif isinstance(message, AsyncIterable): + achunks = aiter(message) + try: + chunk = await anext(achunks) + except StopAsyncIteration: + return + + assert self.fragmented_send_waiter is None + self.fragmented_send_waiter = self.loop.create_future() + try: + # First fragment. + if isinstance(chunk, str): + text = True + async with self.send_context(): + self.protocol.send_text( + chunk.encode(), + fin=False, + ) + elif isinstance(chunk, BytesLike): + text = False + async with self.send_context(): + self.protocol.send_binary( + chunk, + fin=False, + ) + else: + raise TypeError("async iterable must contain bytes or str") + + # Other fragments + async for chunk in achunks: + if isinstance(chunk, str) and text: + async with self.send_context(): + self.protocol.send_continuation( + chunk.encode(), + fin=False, + ) + elif isinstance(chunk, BytesLike) and not text: + async with self.send_context(): + self.protocol.send_continuation( + chunk, + fin=False, + ) + else: + raise TypeError("async iterable must contain uniform types") + + # Final fragment. + async with self.send_context(): + self.protocol.send_continuation(b"", fin=True) + + except Exception: + # We're half-way through a fragmented message and we can't + # complete it. This makes the connection unusable. + async with self.send_context(): + self.protocol.fail(1011, "error in fragmented message") + raise + + finally: + self.fragmented_send_waiter.set_result(None) + self.fragmented_send_waiter = None + + else: + raise TypeError("data must be str, bytes, iterable, or async iterable") + + async def close(self, code: int = 1000, reason: str = "") -> None: + """ + Perform the closing handshake. + + :meth:`close` waits for the other end to complete the handshake and + for the TCP connection to terminate. + + :meth:`close` is idempotent: it doesn't do anything once the + connection is closed. + + Args: + code: WebSocket close code. + reason: WebSocket close reason. + + """ + try: + # The context manager takes care of waiting for the TCP connection + # to terminate after calling a method that sends a close frame. + async with self.send_context(): + if self.fragmented_send_waiter is not None: + self.protocol.fail(1011, "close during fragmented message") + else: + self.protocol.send_close(code, reason) + except ConnectionClosed: + # Ignore ConnectionClosed exceptions raised from send_context(). + # They mean that the connection is closed, which was the goal. + pass + + async def wait_closed(self) -> None: + """ + Wait until the connection is closed. + + :meth:`wait_closed` waits for the closing handshake to complete and for + the TCP connection to terminate. + + """ + await asyncio.shield(self.connection_lost_waiter) + + async def ping(self, data: Data | None = None) -> Awaitable[float]: + """ + Send a Ping_. + + .. _Ping: https://datatracker.ietf.org/doc/html/rfc6455#section-5.5.2 + + A ping may serve as a keepalive or as a check that the remote endpoint + received all messages up to this point + + Args: + data: Payload of the ping. A :class:`str` will be encoded to UTF-8. + If ``data`` is :obj:`None`, the payload is four random bytes. + + Returns: + A future that will be completed when the corresponding pong is + received. You can ignore it if you don't intend to wait. The result + of the future is the latency of the connection in seconds. + + :: + + pong_waiter = await ws.ping() + # only if you want to wait for the corresponding pong + latency = await pong_waiter + + Raises: + ConnectionClosed: When the connection is closed. + ConcurrencyError: If another ping was sent with the same data and + the corresponding pong wasn't received yet. + + """ + if isinstance(data, BytesLike): + data = bytes(data) + elif isinstance(data, str): + data = data.encode() + elif data is not None: + raise TypeError("data must be str or bytes-like") + + async with self.send_context(): + # Protect against duplicates if a payload is explicitly set. + if data in self.pong_waiters: + raise ConcurrencyError("already waiting for a pong with the same data") + + # Generate a unique random payload otherwise. + while data is None or data in self.pong_waiters: + data = struct.pack("!I", random.getrandbits(32)) + + pong_waiter = self.loop.create_future() + # The event loop's default clock is time.monotonic(). Its resolution + # is a bit low on Windows (~16ms). This is improved in Python 3.13. + ping_timestamp = self.loop.time() + self.pong_waiters[data] = (pong_waiter, ping_timestamp) + self.protocol.send_ping(data) + return pong_waiter + + async def pong(self, data: Data = b"") -> None: + """ + Send a Pong_. + + .. _Pong: https://datatracker.ietf.org/doc/html/rfc6455#section-5.5.3 + + An unsolicited pong may serve as a unidirectional heartbeat. + + Args: + data: Payload of the pong. A :class:`str` will be encoded to UTF-8. + + Raises: + ConnectionClosed: When the connection is closed. + + """ + if isinstance(data, BytesLike): + data = bytes(data) + elif isinstance(data, str): + data = data.encode() + else: + raise TypeError("data must be str or bytes-like") + + async with self.send_context(): + self.protocol.send_pong(data) + + # Private methods + + def process_event(self, event: Event) -> None: + """ + Process one incoming event. + + This method is overridden in subclasses to handle the handshake. + + """ + assert isinstance(event, Frame) + if event.opcode in DATA_OPCODES: + self.recv_messages.put(event) + + if event.opcode is Opcode.PONG: + self.acknowledge_pings(bytes(event.data)) + + def acknowledge_pings(self, data: bytes) -> None: + """ + Acknowledge pings when receiving a pong. + + """ + # Ignore unsolicited pong. + if data not in self.pong_waiters: + return + + pong_timestamp = self.loop.time() + + # Sending a pong for only the most recent ping is legal. + # Acknowledge all previous pings too in that case. + ping_id = None + ping_ids = [] + for ping_id, (pong_waiter, ping_timestamp) in self.pong_waiters.items(): + ping_ids.append(ping_id) + latency = pong_timestamp - ping_timestamp + pong_waiter.set_result(latency) + if ping_id == data: + self.latency = latency + break + else: + raise AssertionError("solicited pong not found in pings") + + # Remove acknowledged pings from self.pong_waiters. + for ping_id in ping_ids: + del self.pong_waiters[ping_id] + + def abort_pings(self) -> None: + """ + Raise ConnectionClosed in pending pings. + + They'll never receive a pong once the connection is closed. + + """ + assert self.protocol.state is CLOSED + exc = self.protocol.close_exc + + for pong_waiter, _ping_timestamp in self.pong_waiters.values(): + if not pong_waiter.done(): + pong_waiter.set_exception(exc) + # If the exception is never retrieved, it will be logged when ping + # is garbage-collected. This is confusing for users. + # Given that ping is done (with an exception), canceling it does + # nothing, but it prevents logging the exception. + pong_waiter.cancel() + + self.pong_waiters.clear() + + async def keepalive(self) -> None: + """ + Send a Ping frame and wait for a Pong frame at regular intervals. + + """ + assert self.ping_interval is not None + latency = 0.0 + try: + while True: + # If self.ping_timeout > latency > self.ping_interval, pings + # will be sent immediately after receiving pongs. The period + # will be longer than self.ping_interval. + await asyncio.sleep(self.ping_interval - latency) + + self.logger.debug("% sending keepalive ping") + pong_waiter = await self.ping() + + if self.ping_timeout is not None: + try: + async with asyncio_timeout(self.ping_timeout): + # connection_lost cancels keepalive immediately + # after setting a ConnectionClosed exception on + # pong_waiter. A CancelledError is raised here, + # not a ConnectionClosed exception. + latency = await pong_waiter + self.logger.debug("% received keepalive pong") + except asyncio.TimeoutError: + if self.debug: + self.logger.debug("! timed out waiting for keepalive pong") + async with self.send_context(): + self.protocol.fail( + CloseCode.INTERNAL_ERROR, + "keepalive ping timeout", + ) + raise AssertionError( + "send_context() should wait for connection_lost(), " + "which cancels keepalive()" + ) + except Exception: + self.logger.error("keepalive ping failed", exc_info=True) + + def start_keepalive(self) -> None: + """ + Run :meth:`keepalive` in a task, unless keepalive is disabled. + + """ + if self.ping_interval is not None: + self.keepalive_task = self.loop.create_task(self.keepalive()) + + @contextlib.asynccontextmanager + async def send_context( + self, + *, + expected_state: State = OPEN, # CONNECTING during the opening handshake + ) -> AsyncIterator[None]: + """ + Create a context for writing to the connection from user code. + + On entry, :meth:`send_context` checks that the connection is open; on + exit, it writes outgoing data to the socket:: + + async with self.send_context(): + self.protocol.send_text(message.encode()) + + When the connection isn't open on entry, when the connection is expected + to close on exit, or when an unexpected error happens, terminating the + connection, :meth:`send_context` waits until the connection is closed + then raises :exc:`~websockets.exceptions.ConnectionClosed`. + + """ + # Should we wait until the connection is closed? + wait_for_close = False + # Should we close the transport and raise ConnectionClosed? + raise_close_exc = False + # What exception should we chain ConnectionClosed to? + original_exc: BaseException | None = None + + if self.protocol.state is expected_state: + # Let the caller interact with the protocol. + try: + yield + except (ProtocolError, ConcurrencyError): + # The protocol state wasn't changed. Exit immediately. + raise + except Exception as exc: + self.logger.error("unexpected internal error", exc_info=True) + # This branch should never run. It's a safety net in case of + # bugs. Since we don't know what happened, we will close the + # connection and raise the exception to the caller. + wait_for_close = False + raise_close_exc = True + original_exc = exc + else: + # Check if the connection is expected to close soon. + if self.protocol.close_expected(): + wait_for_close = True + # If the connection is expected to close soon, set the + # close deadline based on the close timeout. + # Since we tested earlier that protocol.state was OPEN + # (or CONNECTING), self.close_deadline is still None. + if self.close_timeout is not None: + assert self.close_deadline is None + self.close_deadline = self.loop.time() + self.close_timeout + # Write outgoing data to the socket and enforce flow control. + try: + self.send_data() + await self.drain() + except Exception as exc: + if self.debug: + self.logger.debug("error while sending data", exc_info=True) + # While the only expected exception here is OSError, + # other exceptions would be treated identically. + wait_for_close = False + raise_close_exc = True + original_exc = exc + + else: # self.protocol.state is not expected_state + # Minor layering violation: we assume that the connection + # will be closing soon if it isn't in the expected state. + wait_for_close = True + # Calculate close_deadline if it wasn't set yet. + if self.close_timeout is not None: + if self.close_deadline is None: + self.close_deadline = self.loop.time() + self.close_timeout + raise_close_exc = True + + # If the connection is expected to close soon and the close timeout + # elapses, close the socket to terminate the connection. + if wait_for_close: + try: + async with asyncio_timeout_at(self.close_deadline): + await asyncio.shield(self.connection_lost_waiter) + except TimeoutError: + # There's no risk to overwrite another error because + # original_exc is never set when wait_for_close is True. + assert original_exc is None + original_exc = TimeoutError("timed out while closing connection") + # Set recv_exc before closing the transport in order to get + # proper exception reporting. + raise_close_exc = True + self.set_recv_exc(original_exc) + + # If an error occurred, close the transport to terminate the connection and + # raise an exception. + if raise_close_exc: + self.close_transport() + # Wait for the protocol state to be CLOSED before accessing close_exc. + await asyncio.shield(self.connection_lost_waiter) + raise self.protocol.close_exc from original_exc + + def send_data(self) -> None: + """ + Send outgoing data. + + Raises: + OSError: When a socket operations fails. + + """ + for data in self.protocol.data_to_send(): + if data: + self.transport.write(data) + else: + # Half-close the TCP connection when possible i.e. no TLS. + if self.transport.can_write_eof(): + if self.debug: + self.logger.debug("x half-closing TCP connection") + # write_eof() doesn't document which exceptions it raises. + # OSError is plausible. uvloop can raise RuntimeError here. + try: + self.transport.write_eof() + except (OSError, RuntimeError): # pragma: no cover + pass + # Else, close the TCP connection. + else: # pragma: no cover + if self.debug: + self.logger.debug("x closing TCP connection") + self.transport.close() + + def set_recv_exc(self, exc: BaseException | None) -> None: + """ + Set recv_exc, if not set yet. + + """ + if self.recv_exc is None: + self.recv_exc = exc + + def close_transport(self) -> None: + """ + Close transport and message assembler. + + """ + self.transport.close() + self.recv_messages.close() + + # asyncio.Protocol methods + + # Connection callbacks + + def connection_made(self, transport: asyncio.BaseTransport) -> None: + transport = cast(asyncio.Transport, transport) + self.recv_messages = Assembler( + *self.max_queue, + pause=transport.pause_reading, + resume=transport.resume_reading, + ) + transport.set_write_buffer_limits(*self.write_limit) + self.transport = transport + + def connection_lost(self, exc: Exception | None) -> None: + # Calling protocol.receive_eof() is safe because it's idempotent. + # This guarantees that the protocol state becomes CLOSED. + self.protocol.receive_eof() + assert self.protocol.state is CLOSED + + self.set_recv_exc(exc) + + # Abort recv() and pending pings with a ConnectionClosed exception. + self.recv_messages.close() + self.abort_pings() + + if self.keepalive_task is not None: + self.keepalive_task.cancel() + + # If self.connection_lost_waiter isn't pending, that's a bug, because: + # - it's set only here in connection_lost() which is called only once; + # - it must never be canceled. + self.connection_lost_waiter.set_result(None) + + # Adapted from asyncio.streams.FlowControlMixin + if self.paused: # pragma: no cover + self.paused = False + for waiter in self.drain_waiters: + if not waiter.done(): + if exc is None: + waiter.set_result(None) + else: + waiter.set_exception(exc) + + # Flow control callbacks + + def pause_writing(self) -> None: # pragma: no cover + # Adapted from asyncio.streams.FlowControlMixin + assert not self.paused + self.paused = True + + def resume_writing(self) -> None: # pragma: no cover + # Adapted from asyncio.streams.FlowControlMixin + assert self.paused + self.paused = False + for waiter in self.drain_waiters: + if not waiter.done(): + waiter.set_result(None) + + async def drain(self) -> None: # pragma: no cover + # We don't check if the connection is closed because we call drain() + # immediately after write() and write() would fail in that case. + + # Adapted from asyncio.streams.StreamWriter + # Yield to the event loop so that connection_lost() may be called. + if self.transport.is_closing(): + await asyncio.sleep(0) + + # Adapted from asyncio.streams.FlowControlMixin + if self.paused: + waiter = self.loop.create_future() + self.drain_waiters.append(waiter) + try: + await waiter + finally: + self.drain_waiters.remove(waiter) + + # Streaming protocol callbacks + + def data_received(self, data: bytes) -> None: + # Feed incoming data to the protocol. + self.protocol.receive_data(data) + + # This isn't expected to raise an exception. + events = self.protocol.events_received() + + # Write outgoing data to the transport. + try: + self.send_data() + except Exception as exc: + if self.debug: + self.logger.debug("error while sending data", exc_info=True) + self.set_recv_exc(exc) + + if self.protocol.close_expected(): + # If the connection is expected to close soon, set the + # close deadline based on the close timeout. + if self.close_timeout is not None: + if self.close_deadline is None: + self.close_deadline = self.loop.time() + self.close_timeout + + for event in events: + # This isn't expected to raise an exception. + self.process_event(event) + + def eof_received(self) -> None: + # Feed the end of the data stream to the connection. + self.protocol.receive_eof() + + # This isn't expected to generate events. + assert not self.protocol.events_received() + + # There is no error handling because send_data() can only write + # the end of the data stream here and it shouldn't raise errors. + self.send_data() + + # The WebSocket protocol has its own closing handshake: endpoints close + # the TCP or TLS connection after sending and receiving a close frame. + # As a consequence, they never need to write after receiving EOF, so + # there's no reason to keep the transport open by returning True. + # Besides, that doesn't work on TLS connections. + + +# broadcast() is defined in the connection module even though it's primarily +# used by servers and documented in the server module because it works with +# client connections too and because it's easier to test together with the +# Connection class. + + +def broadcast( + connections: Iterable[Connection], + message: Data, + raise_exceptions: bool = False, +) -> None: + """ + Broadcast a message to several WebSocket connections. + + A string (:class:`str`) is sent as a Text_ frame. A bytestring or bytes-like + object (:class:`bytes`, :class:`bytearray`, or :class:`memoryview`) is sent + as a Binary_ frame. + + .. _Text: https://datatracker.ietf.org/doc/html/rfc6455#section-5.6 + .. _Binary: https://datatracker.ietf.org/doc/html/rfc6455#section-5.6 + + :func:`broadcast` pushes the message synchronously to all connections even + if their write buffers are overflowing. There's no backpressure. + + If you broadcast messages faster than a connection can handle them, messages + will pile up in its write buffer until the connection times out. Keep + ``ping_interval`` and ``ping_timeout`` low to prevent excessive memory usage + from slow connections. + + Unlike :meth:`~websockets.asyncio.connection.Connection.send`, + :func:`broadcast` doesn't support sending fragmented messages. Indeed, + fragmentation is useful for sending large messages without buffering them in + memory, while :func:`broadcast` buffers one copy per connection as fast as + possible. + + :func:`broadcast` skips connections that aren't open in order to avoid + errors on connections where the closing handshake is in progress. + + :func:`broadcast` ignores failures to write the message on some connections. + It continues writing to other connections. On Python 3.11 and above, you may + set ``raise_exceptions`` to :obj:`True` to record failures and raise all + exceptions in a :pep:`654` :exc:`ExceptionGroup`. + + While :func:`broadcast` makes more sense for servers, it works identically + with clients, if you have a use case for opening connections to many servers + and broadcasting a message to them. + + Args: + websockets: WebSocket connections to which the message will be sent. + message: Message to send. + raise_exceptions: Whether to raise an exception in case of failures. + + Raises: + TypeError: If ``message`` doesn't have a supported type. + + """ + if isinstance(message, str): + send_method = "send_text" + message = message.encode() + elif isinstance(message, BytesLike): + send_method = "send_binary" + else: + raise TypeError("data must be str or bytes") + + if raise_exceptions: + if sys.version_info[:2] < (3, 11): # pragma: no cover + raise ValueError("raise_exceptions requires at least Python 3.11") + exceptions: list[Exception] = [] + + for connection in connections: + exception: Exception + + if connection.protocol.state is not OPEN: + continue + + if connection.fragmented_send_waiter is not None: + if raise_exceptions: + exception = ConcurrencyError("sending a fragmented message") + exceptions.append(exception) + else: + connection.logger.warning( + "skipped broadcast: sending a fragmented message", + ) + continue + + try: + # Call connection.protocol.send_text or send_binary. + # Either way, message is already converted to bytes. + getattr(connection.protocol, send_method)(message) + connection.send_data() + except Exception as write_exception: + if raise_exceptions: + exception = RuntimeError("failed to write message") + exception.__cause__ = write_exception + exceptions.append(exception) + else: + connection.logger.warning( + "skipped broadcast: failed to write message", + exc_info=True, + ) + + if raise_exceptions and exceptions: + raise ExceptionGroup("skipped broadcast", exceptions) + + +# Pretend that broadcast is actually defined in the server module. +broadcast.__module__ = "websockets.asyncio.server" diff --git a/hackaton/lib/python3.12/site-packages/websockets/asyncio/messages.py b/hackaton/lib/python3.12/site-packages/websockets/asyncio/messages.py new file mode 100644 index 0000000..c2b4afd --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/websockets/asyncio/messages.py @@ -0,0 +1,293 @@ +from __future__ import annotations + +import asyncio +import codecs +import collections +from typing import ( + Any, + AsyncIterator, + Callable, + Generic, + Iterable, + TypeVar, +) + +from ..exceptions import ConcurrencyError +from ..frames import OP_BINARY, OP_CONT, OP_TEXT, Frame +from ..typing import Data + + +__all__ = ["Assembler"] + +UTF8Decoder = codecs.getincrementaldecoder("utf-8") + +T = TypeVar("T") + + +class SimpleQueue(Generic[T]): + """ + Simplified version of :class:`asyncio.Queue`. + + Provides only the subset of functionality needed by :class:`Assembler`. + + """ + + def __init__(self) -> None: + self.loop = asyncio.get_running_loop() + self.get_waiter: asyncio.Future[None] | None = None + self.queue: collections.deque[T] = collections.deque() + + def __len__(self) -> int: + return len(self.queue) + + def put(self, item: T) -> None: + """Put an item into the queue without waiting.""" + self.queue.append(item) + if self.get_waiter is not None and not self.get_waiter.done(): + self.get_waiter.set_result(None) + + async def get(self) -> T: + """Remove and return an item from the queue, waiting if necessary.""" + if not self.queue: + if self.get_waiter is not None: + raise ConcurrencyError("get is already running") + self.get_waiter = self.loop.create_future() + try: + await self.get_waiter + finally: + self.get_waiter.cancel() + self.get_waiter = None + return self.queue.popleft() + + def reset(self, items: Iterable[T]) -> None: + """Put back items into an empty, idle queue.""" + assert self.get_waiter is None, "cannot reset() while get() is running" + assert not self.queue, "cannot reset() while queue isn't empty" + self.queue.extend(items) + + def abort(self) -> None: + if self.get_waiter is not None and not self.get_waiter.done(): + self.get_waiter.set_exception(EOFError("stream of frames ended")) + # Clear the queue to avoid storing unnecessary data in memory. + self.queue.clear() + + +class Assembler: + """ + Assemble messages from frames. + + :class:`Assembler` expects only data frames. The stream of frames must + respect the protocol; if it doesn't, the behavior is undefined. + + Args: + pause: Called when the buffer of frames goes above the high water mark; + should pause reading from the network. + resume: Called when the buffer of frames goes below the low water mark; + should resume reading from the network. + + """ + + # coverage reports incorrectly: "line NN didn't jump to the function exit" + def __init__( # pragma: no cover + self, + high: int = 16, + low: int | None = None, + pause: Callable[[], Any] = lambda: None, + resume: Callable[[], Any] = lambda: None, + ) -> None: + # Queue of incoming messages. Each item is a queue of frames. + self.frames: SimpleQueue[Frame] = SimpleQueue() + + # We cannot put a hard limit on the size of the queue because a single + # call to Protocol.data_received() could produce thousands of frames, + # which must be buffered. Instead, we pause reading when the buffer goes + # above the high limit and we resume when it goes under the low limit. + if low is None: + low = high // 4 + if low < 0: + raise ValueError("low must be positive or equal to zero") + if high < low: + raise ValueError("high must be greater than or equal to low") + self.high, self.low = high, low + self.pause = pause + self.resume = resume + self.paused = False + + # This flag prevents concurrent calls to get() by user code. + self.get_in_progress = False + + # This flag marks the end of the connection. + self.closed = False + + async def get(self, decode: bool | None = None) -> Data: + """ + Read the next message. + + :meth:`get` returns a single :class:`str` or :class:`bytes`. + + If the message is fragmented, :meth:`get` waits until the last frame is + received, then it reassembles the message and returns it. To receive + messages frame by frame, use :meth:`get_iter` instead. + + Args: + decode: :obj:`False` disables UTF-8 decoding of text frames and + returns :class:`bytes`. :obj:`True` forces UTF-8 decoding of + binary frames and returns :class:`str`. + + Raises: + EOFError: If the stream of frames has ended. + ConcurrencyError: If two coroutines run :meth:`get` or + :meth:`get_iter` concurrently. + + """ + if self.closed: + raise EOFError("stream of frames ended") + + if self.get_in_progress: + raise ConcurrencyError("get() or get_iter() is already running") + + # Locking with get_in_progress ensures only one coroutine can get here. + self.get_in_progress = True + + # First frame + try: + frame = await self.frames.get() + except asyncio.CancelledError: + self.get_in_progress = False + raise + self.maybe_resume() + assert frame.opcode is OP_TEXT or frame.opcode is OP_BINARY + if decode is None: + decode = frame.opcode is OP_TEXT + frames = [frame] + + # Following frames, for fragmented messages + while not frame.fin: + try: + frame = await self.frames.get() + except asyncio.CancelledError: + # Put frames already received back into the queue + # so that future calls to get() can return them. + self.frames.reset(frames) + self.get_in_progress = False + raise + self.maybe_resume() + assert frame.opcode is OP_CONT + frames.append(frame) + + self.get_in_progress = False + + data = b"".join(frame.data for frame in frames) + if decode: + return data.decode() + else: + return data + + async def get_iter(self, decode: bool | None = None) -> AsyncIterator[Data]: + """ + Stream the next message. + + Iterating the return value of :meth:`get_iter` asynchronously yields a + :class:`str` or :class:`bytes` for each frame in the message. + + The iterator must be fully consumed before calling :meth:`get_iter` or + :meth:`get` again. Else, :exc:`ConcurrencyError` is raised. + + This method only makes sense for fragmented messages. If messages aren't + fragmented, use :meth:`get` instead. + + Args: + decode: :obj:`False` disables UTF-8 decoding of text frames and + returns :class:`bytes`. :obj:`True` forces UTF-8 decoding of + binary frames and returns :class:`str`. + + Raises: + EOFError: If the stream of frames has ended. + ConcurrencyError: If two coroutines run :meth:`get` or + :meth:`get_iter` concurrently. + + """ + if self.closed: + raise EOFError("stream of frames ended") + + if self.get_in_progress: + raise ConcurrencyError("get() or get_iter() is already running") + + # Locking with get_in_progress ensures only one coroutine can get here. + self.get_in_progress = True + + # First frame + try: + frame = await self.frames.get() + except asyncio.CancelledError: + self.get_in_progress = False + raise + self.maybe_resume() + assert frame.opcode is OP_TEXT or frame.opcode is OP_BINARY + if decode is None: + decode = frame.opcode is OP_TEXT + if decode: + decoder = UTF8Decoder() + yield decoder.decode(frame.data, frame.fin) + else: + yield frame.data + + # Following frames, for fragmented messages + while not frame.fin: + # We cannot handle asyncio.CancelledError because we don't buffer + # previous fragments — we're streaming them. Canceling get_iter() + # here will leave the assembler in a stuck state. Future calls to + # get() or get_iter() will raise ConcurrencyError. + frame = await self.frames.get() + self.maybe_resume() + assert frame.opcode is OP_CONT + if decode: + yield decoder.decode(frame.data, frame.fin) + else: + yield frame.data + + self.get_in_progress = False + + def put(self, frame: Frame) -> None: + """ + Add ``frame`` to the next message. + + Raises: + EOFError: If the stream of frames has ended. + + """ + if self.closed: + raise EOFError("stream of frames ended") + + self.frames.put(frame) + self.maybe_pause() + + def maybe_pause(self) -> None: + """Pause the writer if queue is above the high water mark.""" + # Check for "> high" to support high = 0 + if len(self.frames) > self.high and not self.paused: + self.paused = True + self.pause() + + def maybe_resume(self) -> None: + """Resume the writer if queue is below the low water mark.""" + # Check for "<= low" to support low = 0 + if len(self.frames) <= self.low and self.paused: + self.paused = False + self.resume() + + def close(self) -> None: + """ + End the stream of frames. + + Callling :meth:`close` concurrently with :meth:`get`, :meth:`get_iter`, + or :meth:`put` is safe. They will raise :exc:`EOFError`. + + """ + if self.closed: + return + + self.closed = True + + # Unblock get() or get_iter(). + self.frames.abort() diff --git a/hackaton/lib/python3.12/site-packages/websockets/asyncio/server.py b/hackaton/lib/python3.12/site-packages/websockets/asyncio/server.py new file mode 100644 index 0000000..19dae44 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/websockets/asyncio/server.py @@ -0,0 +1,973 @@ +from __future__ import annotations + +import asyncio +import hmac +import http +import logging +import socket +import sys +from types import TracebackType +from typing import ( + Any, + Awaitable, + Callable, + Generator, + Iterable, + Sequence, + Tuple, + cast, +) + +from ..exceptions import InvalidHeader +from ..extensions.base import ServerExtensionFactory +from ..extensions.permessage_deflate import enable_server_permessage_deflate +from ..frames import CloseCode +from ..headers import ( + build_www_authenticate_basic, + parse_authorization_basic, + validate_subprotocols, +) +from ..http11 import SERVER, Request, Response +from ..protocol import CONNECTING, OPEN, Event +from ..server import ServerProtocol +from ..typing import LoggerLike, Origin, StatusLike, Subprotocol +from .compatibility import asyncio_timeout +from .connection import Connection, broadcast + + +__all__ = [ + "broadcast", + "serve", + "unix_serve", + "ServerConnection", + "Server", + "basic_auth", +] + + +class ServerConnection(Connection): + """ + :mod:`asyncio` implementation of a WebSocket server connection. + + :class:`ServerConnection` provides :meth:`recv` and :meth:`send` methods for + receiving and sending messages. + + It supports asynchronous iteration to receive messages:: + + async for message in websocket: + await process(message) + + The iterator exits normally when the connection is closed with close code + 1000 (OK) or 1001 (going away) or without a close code. It raises a + :exc:`~websockets.exceptions.ConnectionClosedError` when the connection is + closed with any other code. + + The ``ping_interval``, ``ping_timeout``, ``close_timeout``, ``max_queue``, + and ``write_limit`` arguments the same meaning as in :func:`serve`. + + Args: + protocol: Sans-I/O connection. + server: Server that manages this connection. + + """ + + def __init__( + self, + protocol: ServerProtocol, + server: Server, + *, + ping_interval: float | None = 20, + ping_timeout: float | None = 20, + close_timeout: float | None = 10, + max_queue: int | tuple[int, int | None] = 16, + write_limit: int | tuple[int, int | None] = 2**15, + ) -> None: + self.protocol: ServerProtocol + super().__init__( + protocol, + ping_interval=ping_interval, + ping_timeout=ping_timeout, + close_timeout=close_timeout, + max_queue=max_queue, + write_limit=write_limit, + ) + self.server = server + self.request_rcvd: asyncio.Future[None] = self.loop.create_future() + self.username: str # see basic_auth() + + def respond(self, status: StatusLike, text: str) -> Response: + """ + Create a plain text HTTP response. + + ``process_request`` and ``process_response`` may call this method to + return an HTTP response instead of performing the WebSocket opening + handshake. + + You can modify the response before returning it, for example by changing + HTTP headers. + + Args: + status: HTTP status code. + text: HTTP response body; it will be encoded to UTF-8. + + Returns: + HTTP response to send to the client. + + """ + return self.protocol.reject(status, text) + + async def handshake( + self, + process_request: ( + Callable[ + [ServerConnection, Request], + Awaitable[Response | None] | Response | None, + ] + | None + ) = None, + process_response: ( + Callable[ + [ServerConnection, Request, Response], + Awaitable[Response | None] | Response | None, + ] + | None + ) = None, + server_header: str | None = SERVER, + ) -> None: + """ + Perform the opening handshake. + + """ + await asyncio.wait( + [self.request_rcvd, self.connection_lost_waiter], + return_when=asyncio.FIRST_COMPLETED, + ) + + if self.request is not None: + async with self.send_context(expected_state=CONNECTING): + response = None + + if process_request is not None: + try: + response = process_request(self, self.request) + if isinstance(response, Awaitable): + response = await response + except Exception as exc: + self.protocol.handshake_exc = exc + response = self.protocol.reject( + http.HTTPStatus.INTERNAL_SERVER_ERROR, + ( + "Failed to open a WebSocket connection.\n" + "See server log for more information.\n" + ), + ) + + if response is None: + if self.server.is_serving(): + self.response = self.protocol.accept(self.request) + else: + self.response = self.protocol.reject( + http.HTTPStatus.SERVICE_UNAVAILABLE, + "Server is shutting down.\n", + ) + else: + assert isinstance(response, Response) # help mypy + self.response = response + + if server_header: + self.response.headers["Server"] = server_header + + response = None + + if process_response is not None: + try: + response = process_response(self, self.request, self.response) + if isinstance(response, Awaitable): + response = await response + except Exception as exc: + self.protocol.handshake_exc = exc + response = self.protocol.reject( + http.HTTPStatus.INTERNAL_SERVER_ERROR, + ( + "Failed to open a WebSocket connection.\n" + "See server log for more information.\n" + ), + ) + + if response is not None: + assert isinstance(response, Response) # help mypy + self.response = response + + self.protocol.send_response(self.response) + + # self.protocol.handshake_exc is always set when the connection is lost + # before receiving a request, when the request cannot be parsed, when + # the handshake encounters an error, or when process_request or + # process_response sends an HTTP response that rejects the handshake. + + if self.protocol.handshake_exc is not None: + raise self.protocol.handshake_exc + + def process_event(self, event: Event) -> None: + """ + Process one incoming event. + + """ + # First event - handshake request. + if self.request is None: + assert isinstance(event, Request) + self.request = event + self.request_rcvd.set_result(None) + # Later events - frames. + else: + super().process_event(event) + + def connection_made(self, transport: asyncio.BaseTransport) -> None: + super().connection_made(transport) + self.server.start_connection_handler(self) + + +class Server: + """ + WebSocket server returned by :func:`serve`. + + This class mirrors the API of :class:`asyncio.Server`. + + It keeps track of WebSocket connections in order to close them properly + when shutting down. + + Args: + handler: Connection handler. It receives the WebSocket connection, + which is a :class:`ServerConnection`, in argument. + process_request: Intercept the request during the opening handshake. + Return an HTTP response to force the response. Return :obj:`None` to + continue normally. When you force an HTTP 101 Continue response, the + handshake is successful. Else, the connection is aborted. + ``process_request`` may be a function or a coroutine. + process_response: Intercept the response during the opening handshake. + Modify the response or return a new HTTP response to force the + response. Return :obj:`None` to continue normally. When you force an + HTTP 101 Continue response, the handshake is successful. Else, the + connection is aborted. ``process_response`` may be a function or a + coroutine. + server_header: Value of the ``Server`` response header. + It defaults to ``"Python/x.y.z websockets/X.Y"``. Setting it to + :obj:`None` removes the header. + open_timeout: Timeout for opening connections in seconds. + :obj:`None` disables the timeout. + logger: Logger for this server. + It defaults to ``logging.getLogger("websockets.server")``. + See the :doc:`logging guide <../../topics/logging>` for details. + + """ + + def __init__( + self, + handler: Callable[[ServerConnection], Awaitable[None]], + *, + process_request: ( + Callable[ + [ServerConnection, Request], + Awaitable[Response | None] | Response | None, + ] + | None + ) = None, + process_response: ( + Callable[ + [ServerConnection, Request, Response], + Awaitable[Response | None] | Response | None, + ] + | None + ) = None, + server_header: str | None = SERVER, + open_timeout: float | None = 10, + logger: LoggerLike | None = None, + ) -> None: + self.loop = asyncio.get_running_loop() + self.handler = handler + self.process_request = process_request + self.process_response = process_response + self.server_header = server_header + self.open_timeout = open_timeout + if logger is None: + logger = logging.getLogger("websockets.server") + self.logger = logger + + # Keep track of active connections. + self.handlers: dict[ServerConnection, asyncio.Task[None]] = {} + + # Task responsible for closing the server and terminating connections. + self.close_task: asyncio.Task[None] | None = None + + # Completed when the server is closed and connections are terminated. + self.closed_waiter: asyncio.Future[None] = self.loop.create_future() + + @property + def connections(self) -> set[ServerConnection]: + """ + Set of active connections. + + This property contains all connections that completed the opening + handshake successfully and didn't start the closing handshake yet. + It can be useful in combination with :func:`~broadcast`. + + """ + return {connection for connection in self.handlers if connection.state is OPEN} + + def wrap(self, server: asyncio.Server) -> None: + """ + Attach to a given :class:`asyncio.Server`. + + Since :meth:`~asyncio.loop.create_server` doesn't support injecting a + custom ``Server`` class, the easiest solution that doesn't rely on + private :mod:`asyncio` APIs is to: + + - instantiate a :class:`Server` + - give the protocol factory a reference to that instance + - call :meth:`~asyncio.loop.create_server` with the factory + - attach the resulting :class:`asyncio.Server` with this method + + """ + self.server = server + for sock in server.sockets: + if sock.family == socket.AF_INET: + name = "%s:%d" % sock.getsockname() + elif sock.family == socket.AF_INET6: + name = "[%s]:%d" % sock.getsockname()[:2] + elif sock.family == socket.AF_UNIX: + name = sock.getsockname() + # In the unlikely event that someone runs websockets over a + # protocol other than IP or Unix sockets, avoid crashing. + else: # pragma: no cover + name = str(sock.getsockname()) + self.logger.info("server listening on %s", name) + + async def conn_handler(self, connection: ServerConnection) -> None: + """ + Handle the lifecycle of a WebSocket connection. + + Since this method doesn't have a caller that can handle exceptions, + it attempts to log relevant ones. + + It guarantees that the TCP connection is closed before exiting. + + """ + try: + async with asyncio_timeout(self.open_timeout): + try: + await connection.handshake( + self.process_request, + self.process_response, + self.server_header, + ) + except asyncio.CancelledError: + connection.close_transport() + raise + except Exception: + connection.logger.error("opening handshake failed", exc_info=True) + connection.close_transport() + return + + assert connection.protocol.state is OPEN + try: + connection.start_keepalive() + await self.handler(connection) + except Exception: + connection.logger.error("connection handler failed", exc_info=True) + await connection.close(CloseCode.INTERNAL_ERROR) + else: + await connection.close() + + except TimeoutError: + # When the opening handshake times out, there's nothing to log. + pass + + except Exception: # pragma: no cover + # Don't leak connections on unexpected errors. + connection.transport.abort() + + finally: + # Registration is tied to the lifecycle of conn_handler() because + # the server waits for connection handlers to terminate, even if + # all connections are already closed. + del self.handlers[connection] + + def start_connection_handler(self, connection: ServerConnection) -> None: + """ + Register a connection with this server. + + """ + # The connection must be registered in self.handlers immediately. + # If it was registered in conn_handler(), a race condition could + # happen when closing the server after scheduling conn_handler() + # but before it starts executing. + self.handlers[connection] = self.loop.create_task(self.conn_handler(connection)) + + def close(self, close_connections: bool = True) -> None: + """ + Close the server. + + * Close the underlying :class:`asyncio.Server`. + * When ``close_connections`` is :obj:`True`, which is the default, + close existing connections. Specifically: + + * Reject opening WebSocket connections with an HTTP 503 (service + unavailable) error. This happens when the server accepted the TCP + connection but didn't complete the opening handshake before closing. + * Close open WebSocket connections with close code 1001 (going away). + + * Wait until all connection handlers terminate. + + :meth:`close` is idempotent. + + """ + if self.close_task is None: + self.close_task = self.get_loop().create_task( + self._close(close_connections) + ) + + async def _close(self, close_connections: bool) -> None: + """ + Implementation of :meth:`close`. + + This calls :meth:`~asyncio.Server.close` on the underlying + :class:`asyncio.Server` object to stop accepting new connections and + then closes open connections with close code 1001. + + """ + self.logger.info("server closing") + + # Stop accepting new connections. + self.server.close() + + # Wait until all accepted connections reach connection_made() and call + # register(). See https://github.com/python/cpython/issues/79033 for + # details. This workaround can be removed when dropping Python < 3.11. + await asyncio.sleep(0) + + if close_connections: + # Close OPEN connections with close code 1001. After server.close(), + # handshake() closes OPENING connections with an HTTP 503 error. + close_tasks = [ + asyncio.create_task(connection.close(1001)) + for connection in self.handlers + if connection.protocol.state is not CONNECTING + ] + # asyncio.wait doesn't accept an empty first argument. + if close_tasks: + await asyncio.wait(close_tasks) + + # Wait until all TCP connections are closed. + await self.server.wait_closed() + + # Wait until all connection handlers terminate. + # asyncio.wait doesn't accept an empty first argument. + if self.handlers: + await asyncio.wait(self.handlers.values()) + + # Tell wait_closed() to return. + self.closed_waiter.set_result(None) + + self.logger.info("server closed") + + async def wait_closed(self) -> None: + """ + Wait until the server is closed. + + When :meth:`wait_closed` returns, all TCP connections are closed and + all connection handlers have returned. + + To ensure a fast shutdown, a connection handler should always be + awaiting at least one of: + + * :meth:`~ServerConnection.recv`: when the connection is closed, + it raises :exc:`~websockets.exceptions.ConnectionClosedOK`; + * :meth:`~ServerConnection.wait_closed`: when the connection is + closed, it returns. + + Then the connection handler is immediately notified of the shutdown; + it can clean up and exit. + + """ + await asyncio.shield(self.closed_waiter) + + def get_loop(self) -> asyncio.AbstractEventLoop: + """ + See :meth:`asyncio.Server.get_loop`. + + """ + return self.server.get_loop() + + def is_serving(self) -> bool: # pragma: no cover + """ + See :meth:`asyncio.Server.is_serving`. + + """ + return self.server.is_serving() + + async def start_serving(self) -> None: # pragma: no cover + """ + See :meth:`asyncio.Server.start_serving`. + + Typical use:: + + server = await serve(..., start_serving=False) + # perform additional setup here... + # ... then start the server + await server.start_serving() + + """ + await self.server.start_serving() + + async def serve_forever(self) -> None: # pragma: no cover + """ + See :meth:`asyncio.Server.serve_forever`. + + Typical use:: + + server = await serve(...) + # this coroutine doesn't return + # canceling it stops the server + await server.serve_forever() + + This is an alternative to using :func:`serve` as an asynchronous context + manager. Shutdown is triggered by canceling :meth:`serve_forever` + instead of exiting a :func:`serve` context. + + """ + await self.server.serve_forever() + + @property + def sockets(self) -> Iterable[socket.socket]: + """ + See :attr:`asyncio.Server.sockets`. + + """ + return self.server.sockets + + async def __aenter__(self) -> Server: # pragma: no cover + return self + + async def __aexit__( + self, + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + traceback: TracebackType | None, + ) -> None: # pragma: no cover + self.close() + await self.wait_closed() + + +# This is spelled in lower case because it's exposed as a callable in the API. +class serve: + """ + Create a WebSocket server listening on ``host`` and ``port``. + + Whenever a client connects, the server creates a :class:`ServerConnection`, + performs the opening handshake, and delegates to the ``handler`` coroutine. + + The handler receives the :class:`ServerConnection` instance, which you can + use to send and receive messages. + + Once the handler completes, either normally or with an exception, the server + performs the closing handshake and closes the connection. + + This coroutine returns a :class:`Server` whose API mirrors + :class:`asyncio.Server`. Treat it as an asynchronous context manager to + ensure that the server will be closed:: + + from websockets.asyncio.server import serve + + def handler(websocket): + ... + + # set this future to exit the server + stop = asyncio.get_running_loop().create_future() + + async with serve(handler, host, port): + await stop + + Alternatively, call :meth:`~Server.serve_forever` to serve requests and + cancel it to stop the server:: + + server = await serve(handler, host, port) + await server.serve_forever() + + Args: + handler: Connection handler. It receives the WebSocket connection, + which is a :class:`ServerConnection`, in argument. + host: Network interfaces the server binds to. + See :meth:`~asyncio.loop.create_server` for details. + port: TCP port the server listens on. + See :meth:`~asyncio.loop.create_server` for details. + origins: Acceptable values of the ``Origin`` header, for defending + against Cross-Site WebSocket Hijacking attacks. Include :obj:`None` + in the list if the lack of an origin is acceptable. + extensions: List of supported extensions, in order in which they + should be negotiated and run. + subprotocols: List of supported subprotocols, in order of decreasing + preference. + select_subprotocol: Callback for selecting a subprotocol among + those supported by the client and the server. It receives a + :class:`ServerConnection` (not a + :class:`~websockets.server.ServerProtocol`!) instance and a list of + subprotocols offered by the client. Other than the first argument, + it has the same behavior as the + :meth:`ServerProtocol.select_subprotocol + ` method. + process_request: Intercept the request during the opening handshake. + Return an HTTP response to force the response or :obj:`None` to + continue normally. When you force an HTTP 101 Continue response, the + handshake is successful. Else, the connection is aborted. + ``process_request`` may be a function or a coroutine. + process_response: Intercept the response during the opening handshake. + Return an HTTP response to force the response or :obj:`None` to + continue normally. When you force an HTTP 101 Continue response, the + handshake is successful. Else, the connection is aborted. + ``process_response`` may be a function or a coroutine. + server_header: Value of the ``Server`` response header. + It defaults to ``"Python/x.y.z websockets/X.Y"``. Setting it to + :obj:`None` removes the header. + compression: The "permessage-deflate" extension is enabled by default. + Set ``compression`` to :obj:`None` to disable it. See the + :doc:`compression guide <../../topics/compression>` for details. + open_timeout: Timeout for opening connections in seconds. + :obj:`None` disables the timeout. + ping_interval: Interval between keepalive pings in seconds. + :obj:`None` disables keepalive. + ping_timeout: Timeout for keepalive pings in seconds. + :obj:`None` disables timeouts. + close_timeout: Timeout for closing connections in seconds. + :obj:`None` disables the timeout. + max_size: Maximum size of incoming messages in bytes. + :obj:`None` disables the limit. + max_queue: High-water mark of the buffer where frames are received. + It defaults to 16 frames. The low-water mark defaults to ``max_queue + // 4``. You may pass a ``(high, low)`` tuple to set the high-water + and low-water marks. + write_limit: High-water mark of write buffer in bytes. It is passed to + :meth:`~asyncio.WriteTransport.set_write_buffer_limits`. It defaults + to 32 KiB. You may pass a ``(high, low)`` tuple to set the + high-water and low-water marks. + logger: Logger for this server. + It defaults to ``logging.getLogger("websockets.server")``. See the + :doc:`logging guide <../../topics/logging>` for details. + create_connection: Factory for the :class:`ServerConnection` managing + the connection. Set it to a wrapper or a subclass to customize + connection handling. + + Any other keyword arguments are passed to the event loop's + :meth:`~asyncio.loop.create_server` method. + + For example: + + * You can set ``ssl`` to a :class:`~ssl.SSLContext` to enable TLS. + + * You can set ``sock`` to provide a preexisting TCP socket. You may call + :func:`socket.create_server` (not to be confused with the event loop's + :meth:`~asyncio.loop.create_server` method) to create a suitable server + socket and customize it. + + * You can set ``start_serving`` to ``False`` to start accepting connections + only after you call :meth:`~Server.start_serving()` or + :meth:`~Server.serve_forever()`. + + """ + + def __init__( + self, + handler: Callable[[ServerConnection], Awaitable[None]], + host: str | None = None, + port: int | None = None, + *, + # WebSocket + origins: Sequence[Origin | None] | None = None, + extensions: Sequence[ServerExtensionFactory] | None = None, + subprotocols: Sequence[Subprotocol] | None = None, + select_subprotocol: ( + Callable[ + [ServerConnection, Sequence[Subprotocol]], + Subprotocol | None, + ] + | None + ) = None, + process_request: ( + Callable[ + [ServerConnection, Request], + Awaitable[Response | None] | Response | None, + ] + | None + ) = None, + process_response: ( + Callable[ + [ServerConnection, Request, Response], + Awaitable[Response | None] | Response | None, + ] + | None + ) = None, + server_header: str | None = SERVER, + compression: str | None = "deflate", + # Timeouts + open_timeout: float | None = 10, + ping_interval: float | None = 20, + ping_timeout: float | None = 20, + close_timeout: float | None = 10, + # Limits + max_size: int | None = 2**20, + max_queue: int | tuple[int, int | None] = 16, + write_limit: int | tuple[int, int | None] = 2**15, + # Logging + logger: LoggerLike | None = None, + # Escape hatch for advanced customization + create_connection: type[ServerConnection] | None = None, + # Other keyword arguments are passed to loop.create_server + **kwargs: Any, + ) -> None: + if subprotocols is not None: + validate_subprotocols(subprotocols) + + if compression == "deflate": + extensions = enable_server_permessage_deflate(extensions) + elif compression is not None: + raise ValueError(f"unsupported compression: {compression}") + + if create_connection is None: + create_connection = ServerConnection + + self.server = Server( + handler, + process_request=process_request, + process_response=process_response, + server_header=server_header, + open_timeout=open_timeout, + logger=logger, + ) + + if kwargs.get("ssl") is not None: + kwargs.setdefault("ssl_handshake_timeout", open_timeout) + if sys.version_info[:2] >= (3, 11): # pragma: no branch + kwargs.setdefault("ssl_shutdown_timeout", close_timeout) + + def factory() -> ServerConnection: + """ + Create an asyncio protocol for managing a WebSocket connection. + + """ + # Create a closure to give select_subprotocol access to connection. + protocol_select_subprotocol: ( + Callable[ + [ServerProtocol, Sequence[Subprotocol]], + Subprotocol | None, + ] + | None + ) = None + if select_subprotocol is not None: + + def protocol_select_subprotocol( + protocol: ServerProtocol, + subprotocols: Sequence[Subprotocol], + ) -> Subprotocol | None: + # mypy doesn't know that select_subprotocol is immutable. + assert select_subprotocol is not None + # Ensure this function is only used in the intended context. + assert protocol is connection.protocol + return select_subprotocol(connection, subprotocols) + + # This is a protocol in the Sans-I/O implementation of websockets. + protocol = ServerProtocol( + origins=origins, + extensions=extensions, + subprotocols=subprotocols, + select_subprotocol=protocol_select_subprotocol, + max_size=max_size, + logger=logger, + ) + # This is a connection in websockets and a protocol in asyncio. + connection = create_connection( + protocol, + self.server, + ping_interval=ping_interval, + ping_timeout=ping_timeout, + close_timeout=close_timeout, + max_queue=max_queue, + write_limit=write_limit, + ) + return connection + + loop = asyncio.get_running_loop() + if kwargs.pop("unix", False): + self.create_server = loop.create_unix_server(factory, **kwargs) + else: + # mypy cannot tell that kwargs must provide sock when port is None. + self.create_server = loop.create_server(factory, host, port, **kwargs) # type: ignore[arg-type] + + # async with serve(...) as ...: ... + + async def __aenter__(self) -> Server: + return await self + + async def __aexit__( + self, + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + traceback: TracebackType | None, + ) -> None: + self.server.close() + await self.server.wait_closed() + + # ... = await serve(...) + + def __await__(self) -> Generator[Any, None, Server]: + # Create a suitable iterator by calling __await__ on a coroutine. + return self.__await_impl__().__await__() + + async def __await_impl__(self) -> Server: + server = await self.create_server + self.server.wrap(server) + return self.server + + # ... = yield from serve(...) - remove when dropping Python < 3.10 + + __iter__ = __await__ + + +def unix_serve( + handler: Callable[[ServerConnection], Awaitable[None]], + path: str | None = None, + **kwargs: Any, +) -> Awaitable[Server]: + """ + Create a WebSocket server listening on a Unix socket. + + This function is identical to :func:`serve`, except the ``host`` and + ``port`` arguments are replaced by ``path``. It's only available on Unix. + + It's useful for deploying a server behind a reverse proxy such as nginx. + + Args: + handler: Connection handler. It receives the WebSocket connection, + which is a :class:`ServerConnection`, in argument. + path: File system path to the Unix socket. + + """ + return serve(handler, unix=True, path=path, **kwargs) + + +def is_credentials(credentials: Any) -> bool: + try: + username, password = credentials + except (TypeError, ValueError): + return False + else: + return isinstance(username, str) and isinstance(password, str) + + +def basic_auth( + realm: str = "", + credentials: tuple[str, str] | Iterable[tuple[str, str]] | None = None, + check_credentials: Callable[[str, str], Awaitable[bool] | bool] | None = None, +) -> Callable[[ServerConnection, Request], Awaitable[Response | None]]: + """ + Factory for ``process_request`` to enforce HTTP Basic Authentication. + + :func:`basic_auth` is designed to integrate with :func:`serve` as follows:: + + from websockets.asyncio.server import basic_auth, serve + + async with serve( + ..., + process_request=basic_auth( + realm="my dev server", + credentials=("hello", "iloveyou"), + ), + ): + + If authentication succeeds, the connection's ``username`` attribute is set. + If it fails, the server responds with an HTTP 401 Unauthorized status. + + One of ``credentials`` or ``check_credentials`` must be provided; not both. + + Args: + realm: Scope of protection. It should contain only ASCII characters + because the encoding of non-ASCII characters is undefined. Refer to + section 2.2 of :rfc:`7235` for details. + credentials: Hard coded authorized credentials. It can be a + ``(username, password)`` pair or a list of such pairs. + check_credentials: Function or coroutine that verifies credentials. + It receives ``username`` and ``password`` arguments and returns + whether they're valid. + Raises: + TypeError: If ``credentials`` or ``check_credentials`` is wrong. + + """ + if (credentials is None) == (check_credentials is None): + raise TypeError("provide either credentials or check_credentials") + + if credentials is not None: + if is_credentials(credentials): + credentials_list = [cast(Tuple[str, str], credentials)] + elif isinstance(credentials, Iterable): + credentials_list = list(cast(Iterable[Tuple[str, str]], credentials)) + if not all(is_credentials(item) for item in credentials_list): + raise TypeError(f"invalid credentials argument: {credentials}") + else: + raise TypeError(f"invalid credentials argument: {credentials}") + + credentials_dict = dict(credentials_list) + + def check_credentials(username: str, password: str) -> bool: + try: + expected_password = credentials_dict[username] + except KeyError: + return False + return hmac.compare_digest(expected_password, password) + + assert check_credentials is not None # help mypy + + async def process_request( + connection: ServerConnection, + request: Request, + ) -> Response | None: + """ + Perform HTTP Basic Authentication. + + If it succeeds, set the connection's ``username`` attribute and return + :obj:`None`. If it fails, return an HTTP 401 Unauthorized responss. + + """ + try: + authorization = request.headers["Authorization"] + except KeyError: + response = connection.respond( + http.HTTPStatus.UNAUTHORIZED, + "Missing credentials\n", + ) + response.headers["WWW-Authenticate"] = build_www_authenticate_basic(realm) + return response + + try: + username, password = parse_authorization_basic(authorization) + except InvalidHeader: + response = connection.respond( + http.HTTPStatus.UNAUTHORIZED, + "Unsupported credentials\n", + ) + response.headers["WWW-Authenticate"] = build_www_authenticate_basic(realm) + return response + + valid_credentials = check_credentials(username, password) + if isinstance(valid_credentials, Awaitable): + valid_credentials = await valid_credentials + + if not valid_credentials: + response = connection.respond( + http.HTTPStatus.UNAUTHORIZED, + "Invalid credentials\n", + ) + response.headers["WWW-Authenticate"] = build_www_authenticate_basic(realm) + return response + + connection.username = username + return None + + return process_request diff --git a/hackaton/lib/python3.12/site-packages/websockets/auth.py b/hackaton/lib/python3.12/site-packages/websockets/auth.py new file mode 100644 index 0000000..b792e02 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/websockets/auth.py @@ -0,0 +1,6 @@ +from __future__ import annotations + +# See #940 for why lazy_import isn't used here for backwards compatibility. +# See #1400 for why listing compatibility imports in __all__ helps PyCharm. +from .legacy.auth import * +from .legacy.auth import __all__ # noqa: F401 diff --git a/hackaton/lib/python3.12/site-packages/websockets/client.py b/hackaton/lib/python3.12/site-packages/websockets/client.py new file mode 100644 index 0000000..e5f2949 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/websockets/client.py @@ -0,0 +1,393 @@ +from __future__ import annotations + +import os +import random +import warnings +from typing import Any, Generator, Sequence + +from .datastructures import Headers, MultipleValuesError +from .exceptions import ( + InvalidHandshake, + InvalidHeader, + InvalidHeaderValue, + InvalidStatus, + InvalidUpgrade, + NegotiationError, +) +from .extensions import ClientExtensionFactory, Extension +from .headers import ( + build_authorization_basic, + build_extension, + build_host, + build_subprotocol, + parse_connection, + parse_extension, + parse_subprotocol, + parse_upgrade, +) +from .http11 import Request, Response +from .protocol import CLIENT, CONNECTING, OPEN, Protocol, State +from .typing import ( + ConnectionOption, + ExtensionHeader, + LoggerLike, + Origin, + Subprotocol, + UpgradeProtocol, +) +from .uri import WebSocketURI +from .utils import accept_key, generate_key + + +# See #940 for why lazy_import isn't used here for backwards compatibility. +# See #1400 for why listing compatibility imports in __all__ helps PyCharm. +from .legacy.client import * # isort:skip # noqa: I001 +from .legacy.client import __all__ as legacy__all__ + + +__all__ = ["ClientProtocol"] + legacy__all__ + + +class ClientProtocol(Protocol): + """ + Sans-I/O implementation of a WebSocket client connection. + + Args: + wsuri: URI of the WebSocket server, parsed + with :func:`~websockets.uri.parse_uri`. + origin: Value of the ``Origin`` header. This is useful when connecting + to a server that validates the ``Origin`` header to defend against + Cross-Site WebSocket Hijacking attacks. + extensions: List of supported extensions, in order in which they + should be tried. + subprotocols: List of supported subprotocols, in order of decreasing + preference. + state: Initial state of the WebSocket connection. + max_size: Maximum size of incoming messages in bytes; + :obj:`None` disables the limit. + logger: Logger for this connection; + defaults to ``logging.getLogger("websockets.client")``; + see the :doc:`logging guide <../../topics/logging>` for details. + + """ + + def __init__( + self, + wsuri: WebSocketURI, + *, + origin: Origin | None = None, + extensions: Sequence[ClientExtensionFactory] | None = None, + subprotocols: Sequence[Subprotocol] | None = None, + state: State = CONNECTING, + max_size: int | None = 2**20, + logger: LoggerLike | None = None, + ) -> None: + super().__init__( + side=CLIENT, + state=state, + max_size=max_size, + logger=logger, + ) + self.wsuri = wsuri + self.origin = origin + self.available_extensions = extensions + self.available_subprotocols = subprotocols + self.key = generate_key() + + def connect(self) -> Request: + """ + Create a handshake request to open a connection. + + You must send the handshake request with :meth:`send_request`. + + You can modify it before sending it, for example to add HTTP headers. + + Returns: + WebSocket handshake request event to send to the server. + + """ + headers = Headers() + + headers["Host"] = build_host( + self.wsuri.host, self.wsuri.port, self.wsuri.secure + ) + + if self.wsuri.user_info: + headers["Authorization"] = build_authorization_basic(*self.wsuri.user_info) + + if self.origin is not None: + headers["Origin"] = self.origin + + headers["Upgrade"] = "websocket" + headers["Connection"] = "Upgrade" + headers["Sec-WebSocket-Key"] = self.key + headers["Sec-WebSocket-Version"] = "13" + + if self.available_extensions is not None: + extensions_header = build_extension( + [ + (extension_factory.name, extension_factory.get_request_params()) + for extension_factory in self.available_extensions + ] + ) + headers["Sec-WebSocket-Extensions"] = extensions_header + + if self.available_subprotocols is not None: + protocol_header = build_subprotocol(self.available_subprotocols) + headers["Sec-WebSocket-Protocol"] = protocol_header + + return Request(self.wsuri.resource_name, headers) + + def process_response(self, response: Response) -> None: + """ + Check a handshake response. + + Args: + request: WebSocket handshake response received from the server. + + Raises: + InvalidHandshake: If the handshake response is invalid. + + """ + + if response.status_code != 101: + raise InvalidStatus(response) + + headers = response.headers + + connection: list[ConnectionOption] = sum( + [parse_connection(value) for value in headers.get_all("Connection")], [] + ) + + if not any(value.lower() == "upgrade" for value in connection): + raise InvalidUpgrade( + "Connection", ", ".join(connection) if connection else None + ) + + upgrade: list[UpgradeProtocol] = sum( + [parse_upgrade(value) for value in headers.get_all("Upgrade")], [] + ) + + # For compatibility with non-strict implementations, ignore case when + # checking the Upgrade header. It's supposed to be 'WebSocket'. + if not (len(upgrade) == 1 and upgrade[0].lower() == "websocket"): + raise InvalidUpgrade("Upgrade", ", ".join(upgrade) if upgrade else None) + + try: + s_w_accept = headers["Sec-WebSocket-Accept"] + except KeyError: + raise InvalidHeader("Sec-WebSocket-Accept") from None + except MultipleValuesError: + raise InvalidHeader("Sec-WebSocket-Accept", "multiple values") from None + + if s_w_accept != accept_key(self.key): + raise InvalidHeaderValue("Sec-WebSocket-Accept", s_w_accept) + + self.extensions = self.process_extensions(headers) + + self.subprotocol = self.process_subprotocol(headers) + + def process_extensions(self, headers: Headers) -> list[Extension]: + """ + Handle the Sec-WebSocket-Extensions HTTP response header. + + Check that each extension is supported, as well as its parameters. + + :rfc:`6455` leaves the rules up to the specification of each + extension. + + To provide this level of flexibility, for each extension accepted by + the server, we check for a match with each extension available in the + client configuration. If no match is found, an exception is raised. + + If several variants of the same extension are accepted by the server, + it may be configured several times, which won't make sense in general. + Extensions must implement their own requirements. For this purpose, + the list of previously accepted extensions is provided. + + Other requirements, for example related to mandatory extensions or the + order of extensions, may be implemented by overriding this method. + + Args: + headers: WebSocket handshake response headers. + + Returns: + List of accepted extensions. + + Raises: + InvalidHandshake: To abort the handshake. + + """ + accepted_extensions: list[Extension] = [] + + extensions = headers.get_all("Sec-WebSocket-Extensions") + + if extensions: + if self.available_extensions is None: + raise NegotiationError("no extensions supported") + + parsed_extensions: list[ExtensionHeader] = sum( + [parse_extension(header_value) for header_value in extensions], [] + ) + + for name, response_params in parsed_extensions: + for extension_factory in self.available_extensions: + # Skip non-matching extensions based on their name. + if extension_factory.name != name: + continue + + # Skip non-matching extensions based on their params. + try: + extension = extension_factory.process_response_params( + response_params, accepted_extensions + ) + except NegotiationError: + continue + + # Add matching extension to the final list. + accepted_extensions.append(extension) + + # Break out of the loop once we have a match. + break + + # If we didn't break from the loop, no extension in our list + # matched what the server sent. Fail the connection. + else: + raise NegotiationError( + f"Unsupported extension: " + f"name = {name}, params = {response_params}" + ) + + return accepted_extensions + + def process_subprotocol(self, headers: Headers) -> Subprotocol | None: + """ + Handle the Sec-WebSocket-Protocol HTTP response header. + + If provided, check that it contains exactly one supported subprotocol. + + Args: + headers: WebSocket handshake response headers. + + Returns: + Subprotocol, if one was selected. + + """ + subprotocol: Subprotocol | None = None + + subprotocols = headers.get_all("Sec-WebSocket-Protocol") + + if subprotocols: + if self.available_subprotocols is None: + raise NegotiationError("no subprotocols supported") + + parsed_subprotocols: Sequence[Subprotocol] = sum( + [parse_subprotocol(header_value) for header_value in subprotocols], [] + ) + + if len(parsed_subprotocols) > 1: + raise InvalidHeader( + "Sec-WebSocket-Protocol", + f"multiple values: {', '.join(parsed_subprotocols)}", + ) + + subprotocol = parsed_subprotocols[0] + + if subprotocol not in self.available_subprotocols: + raise NegotiationError(f"unsupported subprotocol: {subprotocol}") + + return subprotocol + + def send_request(self, request: Request) -> None: + """ + Send a handshake request to the server. + + Args: + request: WebSocket handshake request event. + + """ + if self.debug: + self.logger.debug("> GET %s HTTP/1.1", request.path) + for key, value in request.headers.raw_items(): + self.logger.debug("> %s: %s", key, value) + + self.writes.append(request.serialize()) + + def parse(self) -> Generator[None, None, None]: + if self.state is CONNECTING: + try: + response = yield from Response.parse( + self.reader.read_line, + self.reader.read_exact, + self.reader.read_to_eof, + ) + except Exception as exc: + self.handshake_exc = exc + self.send_eof() + self.parser = self.discard() + next(self.parser) # start coroutine + yield + + if self.debug: + code, phrase = response.status_code, response.reason_phrase + self.logger.debug("< HTTP/1.1 %d %s", code, phrase) + for key, value in response.headers.raw_items(): + self.logger.debug("< %s: %s", key, value) + if response.body is not None: + self.logger.debug("< [body] (%d bytes)", len(response.body)) + + try: + self.process_response(response) + except InvalidHandshake as exc: + response._exception = exc + self.events.append(response) + self.handshake_exc = exc + self.send_eof() + self.parser = self.discard() + next(self.parser) # start coroutine + yield + + assert self.state is CONNECTING + self.state = OPEN + self.events.append(response) + + yield from super().parse() + + +class ClientConnection(ClientProtocol): + def __init__(self, *args: Any, **kwargs: Any) -> None: + warnings.warn( # deprecated in 11.0 - 2023-04-02 + "ClientConnection was renamed to ClientProtocol", + DeprecationWarning, + ) + super().__init__(*args, **kwargs) + + +BACKOFF_INITIAL_DELAY = float(os.environ.get("WEBSOCKETS_BACKOFF_INITIAL_DELAY", "5")) +BACKOFF_MIN_DELAY = float(os.environ.get("WEBSOCKETS_BACKOFF_MIN_DELAY", "3.1")) +BACKOFF_MAX_DELAY = float(os.environ.get("WEBSOCKETS_BACKOFF_MAX_DELAY", "90.0")) +BACKOFF_FACTOR = float(os.environ.get("WEBSOCKETS_BACKOFF_FACTOR", "1.618")) + + +def backoff( + initial_delay: float = BACKOFF_INITIAL_DELAY, + min_delay: float = BACKOFF_MIN_DELAY, + max_delay: float = BACKOFF_MAX_DELAY, + factor: float = BACKOFF_FACTOR, +) -> Generator[float, None, None]: + """ + Generate a series of backoff delays between reconnection attempts. + + Yields: + How many seconds to wait before retrying to connect. + + """ + # Add a random initial delay between 0 and 5 seconds. + # See 7.2.3. Recovering from Abnormal Closure in RFC 6455. + yield random.random() * initial_delay + delay = min_delay + while delay < max_delay: + yield delay + delay *= factor + while True: + yield max_delay diff --git a/hackaton/lib/python3.12/site-packages/websockets/connection.py b/hackaton/lib/python3.12/site-packages/websockets/connection.py new file mode 100644 index 0000000..5e78e34 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/websockets/connection.py @@ -0,0 +1,12 @@ +from __future__ import annotations + +import warnings + +from .protocol import SEND_EOF, Protocol as Connection, Side, State # noqa: F401 + + +warnings.warn( # deprecated in 11.0 - 2023-04-02 + "websockets.connection was renamed to websockets.protocol " + "and Connection was renamed to Protocol", + DeprecationWarning, +) diff --git a/hackaton/lib/python3.12/site-packages/websockets/datastructures.py b/hackaton/lib/python3.12/site-packages/websockets/datastructures.py new file mode 100644 index 0000000..106d6f3 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/websockets/datastructures.py @@ -0,0 +1,192 @@ +from __future__ import annotations + +from typing import ( + Any, + Iterable, + Iterator, + Mapping, + MutableMapping, + Protocol, + Tuple, + Union, +) + + +__all__ = ["Headers", "HeadersLike", "MultipleValuesError"] + + +class MultipleValuesError(LookupError): + """ + Exception raised when :class:`Headers` has multiple values for a key. + + """ + + def __str__(self) -> str: + # Implement the same logic as KeyError_str in Objects/exceptions.c. + if len(self.args) == 1: + return repr(self.args[0]) + return super().__str__() + + +class Headers(MutableMapping[str, str]): + """ + Efficient data structure for manipulating HTTP headers. + + A :class:`list` of ``(name, values)`` is inefficient for lookups. + + A :class:`dict` doesn't suffice because header names are case-insensitive + and multiple occurrences of headers with the same name are possible. + + :class:`Headers` stores HTTP headers in a hybrid data structure to provide + efficient insertions and lookups while preserving the original data. + + In order to account for multiple values with minimal hassle, + :class:`Headers` follows this logic: + + - When getting a header with ``headers[name]``: + - if there's no value, :exc:`KeyError` is raised; + - if there's exactly one value, it's returned; + - if there's more than one value, :exc:`MultipleValuesError` is raised. + + - When setting a header with ``headers[name] = value``, the value is + appended to the list of values for that header. + + - When deleting a header with ``del headers[name]``, all values for that + header are removed (this is slow). + + Other methods for manipulating headers are consistent with this logic. + + As long as no header occurs multiple times, :class:`Headers` behaves like + :class:`dict`, except keys are lower-cased to provide case-insensitivity. + + Two methods support manipulating multiple values explicitly: + + - :meth:`get_all` returns a list of all values for a header; + - :meth:`raw_items` returns an iterator of ``(name, values)`` pairs. + + """ + + __slots__ = ["_dict", "_list"] + + # Like dict, Headers accepts an optional "mapping or iterable" argument. + def __init__(self, *args: HeadersLike, **kwargs: str) -> None: + self._dict: dict[str, list[str]] = {} + self._list: list[tuple[str, str]] = [] + self.update(*args, **kwargs) + + def __str__(self) -> str: + return "".join(f"{key}: {value}\r\n" for key, value in self._list) + "\r\n" + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self._list!r})" + + def copy(self) -> Headers: + copy = self.__class__() + copy._dict = self._dict.copy() + copy._list = self._list.copy() + return copy + + def serialize(self) -> bytes: + # Since headers only contain ASCII characters, we can keep this simple. + return str(self).encode() + + # Collection methods + + def __contains__(self, key: object) -> bool: + return isinstance(key, str) and key.lower() in self._dict + + def __iter__(self) -> Iterator[str]: + return iter(self._dict) + + def __len__(self) -> int: + return len(self._dict) + + # MutableMapping methods + + def __getitem__(self, key: str) -> str: + value = self._dict[key.lower()] + if len(value) == 1: + return value[0] + else: + raise MultipleValuesError(key) + + def __setitem__(self, key: str, value: str) -> None: + self._dict.setdefault(key.lower(), []).append(value) + self._list.append((key, value)) + + def __delitem__(self, key: str) -> None: + key_lower = key.lower() + self._dict.__delitem__(key_lower) + # This is inefficient. Fortunately deleting HTTP headers is uncommon. + self._list = [(k, v) for k, v in self._list if k.lower() != key_lower] + + def __eq__(self, other: Any) -> bool: + if not isinstance(other, Headers): + return NotImplemented + return self._dict == other._dict + + def clear(self) -> None: + """ + Remove all headers. + + """ + self._dict = {} + self._list = [] + + def update(self, *args: HeadersLike, **kwargs: str) -> None: + """ + Update from a :class:`Headers` instance and/or keyword arguments. + + """ + args = tuple( + arg.raw_items() if isinstance(arg, Headers) else arg for arg in args + ) + super().update(*args, **kwargs) + + # Methods for handling multiple values + + def get_all(self, key: str) -> list[str]: + """ + Return the (possibly empty) list of all values for a header. + + Args: + key: Header name. + + """ + return self._dict.get(key.lower(), []) + + def raw_items(self) -> Iterator[tuple[str, str]]: + """ + Return an iterator of all values as ``(name, value)`` pairs. + + """ + return iter(self._list) + + +# copy of _typeshed.SupportsKeysAndGetItem. +class SupportsKeysAndGetItem(Protocol): # pragma: no cover + """ + Dict-like types with ``keys() -> str`` and ``__getitem__(key: str) -> str`` methods. + + """ + + def keys(self) -> Iterable[str]: ... + + def __getitem__(self, key: str) -> str: ... + + +# Change to Headers | Mapping[str, str] | ... when dropping Python < 3.10. +HeadersLike = Union[ + Headers, + Mapping[str, str], + # Change to tuple[str, str] when dropping Python < 3.9. + Iterable[Tuple[str, str]], + SupportsKeysAndGetItem, +] +""" +Types accepted where :class:`Headers` is expected. + +In addition to :class:`Headers` itself, this includes dict-like types where both +keys and values are :class:`str`. + +""" diff --git a/hackaton/lib/python3.12/site-packages/websockets/exceptions.py b/hackaton/lib/python3.12/site-packages/websockets/exceptions.py new file mode 100644 index 0000000..d723f2f --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/websockets/exceptions.py @@ -0,0 +1,392 @@ +""" +:mod:`websockets.exceptions` defines the following hierarchy of exceptions. + +* :exc:`WebSocketException` + * :exc:`ConnectionClosed` + * :exc:`ConnectionClosedOK` + * :exc:`ConnectionClosedError` + * :exc:`InvalidURI` + * :exc:`InvalidHandshake` + * :exc:`SecurityError` + * :exc:`InvalidMessage` (legacy) + * :exc:`InvalidStatus` + * :exc:`InvalidStatusCode` (legacy) + * :exc:`InvalidHeader` + * :exc:`InvalidHeaderFormat` + * :exc:`InvalidHeaderValue` + * :exc:`InvalidOrigin` + * :exc:`InvalidUpgrade` + * :exc:`NegotiationError` + * :exc:`DuplicateParameter` + * :exc:`InvalidParameterName` + * :exc:`InvalidParameterValue` + * :exc:`AbortHandshake` (legacy) + * :exc:`RedirectHandshake` (legacy) + * :exc:`ProtocolError` (Sans-I/O) + * :exc:`PayloadTooBig` (Sans-I/O) + * :exc:`InvalidState` (Sans-I/O) + * :exc:`ConcurrencyError` + +""" + +from __future__ import annotations + +import typing +import warnings + +from .imports import lazy_import + + +__all__ = [ + "WebSocketException", + "ConnectionClosed", + "ConnectionClosedOK", + "ConnectionClosedError", + "InvalidURI", + "InvalidHandshake", + "SecurityError", + "InvalidMessage", + "InvalidStatus", + "InvalidStatusCode", + "InvalidHeader", + "InvalidHeaderFormat", + "InvalidHeaderValue", + "InvalidOrigin", + "InvalidUpgrade", + "NegotiationError", + "DuplicateParameter", + "InvalidParameterName", + "InvalidParameterValue", + "AbortHandshake", + "RedirectHandshake", + "ProtocolError", + "WebSocketProtocolError", + "PayloadTooBig", + "InvalidState", + "ConcurrencyError", +] + + +class WebSocketException(Exception): + """ + Base class for all exceptions defined by websockets. + + """ + + +class ConnectionClosed(WebSocketException): + """ + Raised when trying to interact with a closed connection. + + Attributes: + rcvd: If a close frame was received, its code and reason are available + in ``rcvd.code`` and ``rcvd.reason``. + sent: If a close frame was sent, its code and reason are available + in ``sent.code`` and ``sent.reason``. + rcvd_then_sent: If close frames were received and sent, this attribute + tells in which order this happened, from the perspective of this + side of the connection. + + """ + + def __init__( + self, + rcvd: frames.Close | None, + sent: frames.Close | None, + rcvd_then_sent: bool | None = None, + ) -> None: + self.rcvd = rcvd + self.sent = sent + self.rcvd_then_sent = rcvd_then_sent + assert (self.rcvd_then_sent is None) == (self.rcvd is None or self.sent is None) + + def __str__(self) -> str: + if self.rcvd is None: + if self.sent is None: + return "no close frame received or sent" + else: + return f"sent {self.sent}; no close frame received" + else: + if self.sent is None: + return f"received {self.rcvd}; no close frame sent" + else: + if self.rcvd_then_sent: + return f"received {self.rcvd}; then sent {self.sent}" + else: + return f"sent {self.sent}; then received {self.rcvd}" + + # code and reason attributes are provided for backwards-compatibility + + @property + def code(self) -> int: + warnings.warn( # deprecated in 13.1 + "ConnectionClosed.code is deprecated; " + "use Protocol.close_code or ConnectionClosed.rcvd.code", + DeprecationWarning, + ) + if self.rcvd is None: + return frames.CloseCode.ABNORMAL_CLOSURE + return self.rcvd.code + + @property + def reason(self) -> str: + warnings.warn( # deprecated in 13.1 + "ConnectionClosed.reason is deprecated; " + "use Protocol.close_reason or ConnectionClosed.rcvd.reason", + DeprecationWarning, + ) + if self.rcvd is None: + return "" + return self.rcvd.reason + + +class ConnectionClosedOK(ConnectionClosed): + """ + Like :exc:`ConnectionClosed`, when the connection terminated properly. + + A close code with code 1000 (OK) or 1001 (going away) or without a code was + received and sent. + + """ + + +class ConnectionClosedError(ConnectionClosed): + """ + Like :exc:`ConnectionClosed`, when the connection terminated with an error. + + A close frame with a code other than 1000 (OK) or 1001 (going away) was + received or sent, or the closing handshake didn't complete properly. + + """ + + +class InvalidURI(WebSocketException): + """ + Raised when connecting to a URI that isn't a valid WebSocket URI. + + """ + + def __init__(self, uri: str, msg: str) -> None: + self.uri = uri + self.msg = msg + + def __str__(self) -> str: + return f"{self.uri} isn't a valid URI: {self.msg}" + + +class InvalidHandshake(WebSocketException): + """ + Base class for exceptions raised when the opening handshake fails. + + """ + + +class SecurityError(InvalidHandshake): + """ + Raised when a handshake request or response breaks a security rule. + + Security limits can be configured with :doc:`environment variables + <../reference/variables>`. + + """ + + +class InvalidStatus(InvalidHandshake): + """ + Raised when a handshake response rejects the WebSocket upgrade. + + """ + + def __init__(self, response: http11.Response) -> None: + self.response = response + + def __str__(self) -> str: + return ( + "server rejected WebSocket connection: " + f"HTTP {self.response.status_code:d}" + ) + + +class InvalidHeader(InvalidHandshake): + """ + Raised when an HTTP header doesn't have a valid format or value. + + """ + + def __init__(self, name: str, value: str | None = None) -> None: + self.name = name + self.value = value + + def __str__(self) -> str: + if self.value is None: + return f"missing {self.name} header" + elif self.value == "": + return f"empty {self.name} header" + else: + return f"invalid {self.name} header: {self.value}" + + +class InvalidHeaderFormat(InvalidHeader): + """ + Raised when an HTTP header cannot be parsed. + + The format of the header doesn't match the grammar for that header. + + """ + + def __init__(self, name: str, error: str, header: str, pos: int) -> None: + super().__init__(name, f"{error} at {pos} in {header}") + + +class InvalidHeaderValue(InvalidHeader): + """ + Raised when an HTTP header has a wrong value. + + The format of the header is correct but the value isn't acceptable. + + """ + + +class InvalidOrigin(InvalidHeader): + """ + Raised when the Origin header in a request isn't allowed. + + """ + + def __init__(self, origin: str | None) -> None: + super().__init__("Origin", origin) + + +class InvalidUpgrade(InvalidHeader): + """ + Raised when the Upgrade or Connection header isn't correct. + + """ + + +class NegotiationError(InvalidHandshake): + """ + Raised when negotiating an extension or a subprotocol fails. + + """ + + +class DuplicateParameter(NegotiationError): + """ + Raised when a parameter name is repeated in an extension header. + + """ + + def __init__(self, name: str) -> None: + self.name = name + + def __str__(self) -> str: + return f"duplicate parameter: {self.name}" + + +class InvalidParameterName(NegotiationError): + """ + Raised when a parameter name in an extension header is invalid. + + """ + + def __init__(self, name: str) -> None: + self.name = name + + def __str__(self) -> str: + return f"invalid parameter name: {self.name}" + + +class InvalidParameterValue(NegotiationError): + """ + Raised when a parameter value in an extension header is invalid. + + """ + + def __init__(self, name: str, value: str | None) -> None: + self.name = name + self.value = value + + def __str__(self) -> str: + if self.value is None: + return f"missing value for parameter {self.name}" + elif self.value == "": + return f"empty value for parameter {self.name}" + else: + return f"invalid value for parameter {self.name}: {self.value}" + + +class ProtocolError(WebSocketException): + """ + Raised when receiving or sending a frame that breaks the protocol. + + The Sans-I/O implementation raises this exception when: + + * receiving or sending a frame that contains invalid data; + * receiving or sending an invalid sequence of frames. + + """ + + +class PayloadTooBig(WebSocketException): + """ + Raised when parsing a frame with a payload that exceeds the maximum size. + + The Sans-I/O layer uses this exception internally. It doesn't bubble up to + the I/O layer. + + The :meth:`~websockets.extensions.Extension.decode` method of extensions + must raise :exc:`PayloadTooBig` if decoding a frame would exceed the limit. + + """ + + +class InvalidState(WebSocketException, AssertionError): + """ + Raised when sending a frame is forbidden in the current state. + + Specifically, the Sans-I/O layer raises this exception when: + + * sending a data frame to a connection in a state other + :attr:`~websockets.protocol.State.OPEN`; + * sending a control frame to a connection in a state other than + :attr:`~websockets.protocol.State.OPEN` or + :attr:`~websockets.protocol.State.CLOSING`. + + """ + + +class ConcurrencyError(WebSocketException, RuntimeError): + """ + Raised when receiving or sending messages concurrently. + + WebSocket is a connection-oriented protocol. Reads must be serialized; so + must be writes. However, reading and writing concurrently is possible. + + """ + + +# When type checking, import non-deprecated aliases eagerly. Else, import on demand. +if typing.TYPE_CHECKING: + from .legacy.exceptions import ( + AbortHandshake, + InvalidMessage, + InvalidStatusCode, + RedirectHandshake, + ) + + WebSocketProtocolError = ProtocolError +else: + lazy_import( + globals(), + aliases={ + "AbortHandshake": ".legacy.exceptions", + "InvalidMessage": ".legacy.exceptions", + "InvalidStatusCode": ".legacy.exceptions", + "RedirectHandshake": ".legacy.exceptions", + "WebSocketProtocolError": ".legacy.exceptions", + }, + ) + +# At the bottom to break import cycles created by type annotations. +from . import frames, http11 # noqa: E402 diff --git a/hackaton/lib/python3.12/site-packages/websockets/extensions/__init__.py b/hackaton/lib/python3.12/site-packages/websockets/extensions/__init__.py new file mode 100644 index 0000000..02838b9 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/websockets/extensions/__init__.py @@ -0,0 +1,4 @@ +from .base import * + + +__all__ = ["Extension", "ClientExtensionFactory", "ServerExtensionFactory"] diff --git a/hackaton/lib/python3.12/site-packages/websockets/extensions/__pycache__/__init__.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/websockets/extensions/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..944e01f Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/websockets/extensions/__pycache__/__init__.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/websockets/extensions/__pycache__/base.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/websockets/extensions/__pycache__/base.cpython-312.pyc new file mode 100644 index 0000000..a3c2dc3 Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/websockets/extensions/__pycache__/base.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/websockets/extensions/__pycache__/permessage_deflate.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/websockets/extensions/__pycache__/permessage_deflate.cpython-312.pyc new file mode 100644 index 0000000..fedf316 Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/websockets/extensions/__pycache__/permessage_deflate.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/websockets/extensions/base.py b/hackaton/lib/python3.12/site-packages/websockets/extensions/base.py new file mode 100644 index 0000000..75bae6b --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/websockets/extensions/base.py @@ -0,0 +1,123 @@ +from __future__ import annotations + +from typing import Sequence + +from ..frames import Frame +from ..typing import ExtensionName, ExtensionParameter + + +__all__ = ["Extension", "ClientExtensionFactory", "ServerExtensionFactory"] + + +class Extension: + """ + Base class for extensions. + + """ + + name: ExtensionName + """Extension identifier.""" + + def decode(self, frame: Frame, *, max_size: int | None = None) -> Frame: + """ + Decode an incoming frame. + + Args: + frame: Incoming frame. + max_size: Maximum payload size in bytes. + + Returns: + Decoded frame. + + Raises: + PayloadTooBig: If decoding the payload exceeds ``max_size``. + + """ + raise NotImplementedError + + def encode(self, frame: Frame) -> Frame: + """ + Encode an outgoing frame. + + Args: + frame: Outgoing frame. + + Returns: + Encoded frame. + + """ + raise NotImplementedError + + +class ClientExtensionFactory: + """ + Base class for client-side extension factories. + + """ + + name: ExtensionName + """Extension identifier.""" + + def get_request_params(self) -> list[ExtensionParameter]: + """ + Build parameters to send to the server for this extension. + + Returns: + Parameters to send to the server. + + """ + raise NotImplementedError + + def process_response_params( + self, + params: Sequence[ExtensionParameter], + accepted_extensions: Sequence[Extension], + ) -> Extension: + """ + Process parameters received from the server. + + Args: + params: Parameters received from the server for this extension. + accepted_extensions: List of previously accepted extensions. + + Returns: + An extension instance. + + Raises: + NegotiationError: If parameters aren't acceptable. + + """ + raise NotImplementedError + + +class ServerExtensionFactory: + """ + Base class for server-side extension factories. + + """ + + name: ExtensionName + """Extension identifier.""" + + def process_request_params( + self, + params: Sequence[ExtensionParameter], + accepted_extensions: Sequence[Extension], + ) -> tuple[list[ExtensionParameter], Extension]: + """ + Process parameters received from the client. + + Args: + params: Parameters received from the client for this extension. + accepted_extensions: List of previously accepted extensions. + + Returns: + To accept the offer, parameters to send to the client for this + extension and an extension instance. + + Raises: + NegotiationError: To reject the offer, if parameters received from + the client aren't acceptable. + + """ + raise NotImplementedError diff --git a/hackaton/lib/python3.12/site-packages/websockets/extensions/permessage_deflate.py b/hackaton/lib/python3.12/site-packages/websockets/extensions/permessage_deflate.py new file mode 100644 index 0000000..25d2c1c --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/websockets/extensions/permessage_deflate.py @@ -0,0 +1,670 @@ +from __future__ import annotations + +import dataclasses +import zlib +from typing import Any, Sequence + +from .. import frames +from ..exceptions import ( + DuplicateParameter, + InvalidParameterName, + InvalidParameterValue, + NegotiationError, + PayloadTooBig, + ProtocolError, +) +from ..typing import ExtensionName, ExtensionParameter +from .base import ClientExtensionFactory, Extension, ServerExtensionFactory + + +__all__ = [ + "PerMessageDeflate", + "ClientPerMessageDeflateFactory", + "enable_client_permessage_deflate", + "ServerPerMessageDeflateFactory", + "enable_server_permessage_deflate", +] + +_EMPTY_UNCOMPRESSED_BLOCK = b"\x00\x00\xff\xff" + +_MAX_WINDOW_BITS_VALUES = [str(bits) for bits in range(8, 16)] + + +class PerMessageDeflate(Extension): + """ + Per-Message Deflate extension. + + """ + + name = ExtensionName("permessage-deflate") + + def __init__( + self, + remote_no_context_takeover: bool, + local_no_context_takeover: bool, + remote_max_window_bits: int, + local_max_window_bits: int, + compress_settings: dict[Any, Any] | None = None, + ) -> None: + """ + Configure the Per-Message Deflate extension. + + """ + if compress_settings is None: + compress_settings = {} + + assert remote_no_context_takeover in [False, True] + assert local_no_context_takeover in [False, True] + assert 8 <= remote_max_window_bits <= 15 + assert 8 <= local_max_window_bits <= 15 + assert "wbits" not in compress_settings + + self.remote_no_context_takeover = remote_no_context_takeover + self.local_no_context_takeover = local_no_context_takeover + self.remote_max_window_bits = remote_max_window_bits + self.local_max_window_bits = local_max_window_bits + self.compress_settings = compress_settings + + if not self.remote_no_context_takeover: + self.decoder = zlib.decompressobj(wbits=-self.remote_max_window_bits) + + if not self.local_no_context_takeover: + self.encoder = zlib.compressobj( + wbits=-self.local_max_window_bits, + **self.compress_settings, + ) + + # To handle continuation frames properly, we must keep track of + # whether that initial frame was encoded. + self.decode_cont_data = False + # There's no need for self.encode_cont_data because we always encode + # outgoing frames, so it would always be True. + + def __repr__(self) -> str: + return ( + f"PerMessageDeflate(" + f"remote_no_context_takeover={self.remote_no_context_takeover}, " + f"local_no_context_takeover={self.local_no_context_takeover}, " + f"remote_max_window_bits={self.remote_max_window_bits}, " + f"local_max_window_bits={self.local_max_window_bits})" + ) + + def decode( + self, + frame: frames.Frame, + *, + max_size: int | None = None, + ) -> frames.Frame: + """ + Decode an incoming frame. + + """ + # Skip control frames. + if frame.opcode in frames.CTRL_OPCODES: + return frame + + # Handle continuation data frames: + # - skip if the message isn't encoded + # - reset "decode continuation data" flag if it's a final frame + if frame.opcode is frames.OP_CONT: + if not self.decode_cont_data: + return frame + if frame.fin: + self.decode_cont_data = False + + # Handle text and binary data frames: + # - skip if the message isn't encoded + # - unset the rsv1 flag on the first frame of a compressed message + # - set "decode continuation data" flag if it's a non-final frame + else: + if not frame.rsv1: + return frame + frame = dataclasses.replace(frame, rsv1=False) + if not frame.fin: + self.decode_cont_data = True + + # Re-initialize per-message decoder. + if self.remote_no_context_takeover: + self.decoder = zlib.decompressobj(wbits=-self.remote_max_window_bits) + + # Uncompress data. Protect against zip bombs by preventing zlib from + # decompressing more than max_length bytes (except when the limit is + # disabled with max_size = None). + data = frame.data + if frame.fin: + data += _EMPTY_UNCOMPRESSED_BLOCK + max_length = 0 if max_size is None else max_size + try: + data = self.decoder.decompress(data, max_length) + except zlib.error as exc: + raise ProtocolError("decompression failed") from exc + if self.decoder.unconsumed_tail: + raise PayloadTooBig(f"over size limit (? > {max_size} bytes)") + + # Allow garbage collection of the decoder if it won't be reused. + if frame.fin and self.remote_no_context_takeover: + del self.decoder + + return dataclasses.replace(frame, data=data) + + def encode(self, frame: frames.Frame) -> frames.Frame: + """ + Encode an outgoing frame. + + """ + # Skip control frames. + if frame.opcode in frames.CTRL_OPCODES: + return frame + + # Since we always encode messages, there's no "encode continuation + # data" flag similar to "decode continuation data" at this time. + + if frame.opcode is not frames.OP_CONT: + # Set the rsv1 flag on the first frame of a compressed message. + frame = dataclasses.replace(frame, rsv1=True) + # Re-initialize per-message decoder. + if self.local_no_context_takeover: + self.encoder = zlib.compressobj( + wbits=-self.local_max_window_bits, + **self.compress_settings, + ) + + # Compress data. + data = self.encoder.compress(frame.data) + self.encoder.flush(zlib.Z_SYNC_FLUSH) + if frame.fin and data.endswith(_EMPTY_UNCOMPRESSED_BLOCK): + data = data[:-4] + + # Allow garbage collection of the encoder if it won't be reused. + if frame.fin and self.local_no_context_takeover: + del self.encoder + + return dataclasses.replace(frame, data=data) + + +def _build_parameters( + server_no_context_takeover: bool, + client_no_context_takeover: bool, + server_max_window_bits: int | None, + client_max_window_bits: int | bool | None, +) -> list[ExtensionParameter]: + """ + Build a list of ``(name, value)`` pairs for some compression parameters. + + """ + params: list[ExtensionParameter] = [] + if server_no_context_takeover: + params.append(("server_no_context_takeover", None)) + if client_no_context_takeover: + params.append(("client_no_context_takeover", None)) + if server_max_window_bits: + params.append(("server_max_window_bits", str(server_max_window_bits))) + if client_max_window_bits is True: # only in handshake requests + params.append(("client_max_window_bits", None)) + elif client_max_window_bits: + params.append(("client_max_window_bits", str(client_max_window_bits))) + return params + + +def _extract_parameters( + params: Sequence[ExtensionParameter], *, is_server: bool +) -> tuple[bool, bool, int | None, int | bool | None]: + """ + Extract compression parameters from a list of ``(name, value)`` pairs. + + If ``is_server`` is :obj:`True`, ``client_max_window_bits`` may be + provided without a value. This is only allowed in handshake requests. + + """ + server_no_context_takeover: bool = False + client_no_context_takeover: bool = False + server_max_window_bits: int | None = None + client_max_window_bits: int | bool | None = None + + for name, value in params: + if name == "server_no_context_takeover": + if server_no_context_takeover: + raise DuplicateParameter(name) + if value is None: + server_no_context_takeover = True + else: + raise InvalidParameterValue(name, value) + + elif name == "client_no_context_takeover": + if client_no_context_takeover: + raise DuplicateParameter(name) + if value is None: + client_no_context_takeover = True + else: + raise InvalidParameterValue(name, value) + + elif name == "server_max_window_bits": + if server_max_window_bits is not None: + raise DuplicateParameter(name) + if value in _MAX_WINDOW_BITS_VALUES: + server_max_window_bits = int(value) + else: + raise InvalidParameterValue(name, value) + + elif name == "client_max_window_bits": + if client_max_window_bits is not None: + raise DuplicateParameter(name) + if is_server and value is None: # only in handshake requests + client_max_window_bits = True + elif value in _MAX_WINDOW_BITS_VALUES: + client_max_window_bits = int(value) + else: + raise InvalidParameterValue(name, value) + + else: + raise InvalidParameterName(name) + + return ( + server_no_context_takeover, + client_no_context_takeover, + server_max_window_bits, + client_max_window_bits, + ) + + +class ClientPerMessageDeflateFactory(ClientExtensionFactory): + """ + Client-side extension factory for the Per-Message Deflate extension. + + Parameters behave as described in `section 7.1 of RFC 7692`_. + + .. _section 7.1 of RFC 7692: https://datatracker.ietf.org/doc/html/rfc7692#section-7.1 + + Set them to :obj:`True` to include them in the negotiation offer without a + value or to an integer value to include them with this value. + + Args: + server_no_context_takeover: Prevent server from using context takeover. + client_no_context_takeover: Prevent client from using context takeover. + server_max_window_bits: Maximum size of the server's LZ77 sliding window + in bits, between 8 and 15. + client_max_window_bits: Maximum size of the client's LZ77 sliding window + in bits, between 8 and 15, or :obj:`True` to indicate support without + setting a limit. + compress_settings: Additional keyword arguments for :func:`zlib.compressobj`, + excluding ``wbits``. + + """ + + name = ExtensionName("permessage-deflate") + + def __init__( + self, + server_no_context_takeover: bool = False, + client_no_context_takeover: bool = False, + server_max_window_bits: int | None = None, + client_max_window_bits: int | bool | None = True, + compress_settings: dict[str, Any] | None = None, + ) -> None: + """ + Configure the Per-Message Deflate extension factory. + + """ + if not (server_max_window_bits is None or 8 <= server_max_window_bits <= 15): + raise ValueError("server_max_window_bits must be between 8 and 15") + if not ( + client_max_window_bits is None + or client_max_window_bits is True + or 8 <= client_max_window_bits <= 15 + ): + raise ValueError("client_max_window_bits must be between 8 and 15") + if compress_settings is not None and "wbits" in compress_settings: + raise ValueError( + "compress_settings must not include wbits, " + "set client_max_window_bits instead" + ) + + self.server_no_context_takeover = server_no_context_takeover + self.client_no_context_takeover = client_no_context_takeover + self.server_max_window_bits = server_max_window_bits + self.client_max_window_bits = client_max_window_bits + self.compress_settings = compress_settings + + def get_request_params(self) -> list[ExtensionParameter]: + """ + Build request parameters. + + """ + return _build_parameters( + self.server_no_context_takeover, + self.client_no_context_takeover, + self.server_max_window_bits, + self.client_max_window_bits, + ) + + def process_response_params( + self, + params: Sequence[ExtensionParameter], + accepted_extensions: Sequence[Extension], + ) -> PerMessageDeflate: + """ + Process response parameters. + + Return an extension instance. + + """ + if any(other.name == self.name for other in accepted_extensions): + raise NegotiationError(f"received duplicate {self.name}") + + # Request parameters are available in instance variables. + + # Load response parameters in local variables. + ( + server_no_context_takeover, + client_no_context_takeover, + server_max_window_bits, + client_max_window_bits, + ) = _extract_parameters(params, is_server=False) + + # After comparing the request and the response, the final + # configuration must be available in the local variables. + + # server_no_context_takeover + # + # Req. Resp. Result + # ------ ------ -------------------------------------------------- + # False False False + # False True True + # True False Error! + # True True True + + if self.server_no_context_takeover: + if not server_no_context_takeover: + raise NegotiationError("expected server_no_context_takeover") + + # client_no_context_takeover + # + # Req. Resp. Result + # ------ ------ -------------------------------------------------- + # False False False + # False True True + # True False True - must change value + # True True True + + if self.client_no_context_takeover: + if not client_no_context_takeover: + client_no_context_takeover = True + + # server_max_window_bits + + # Req. Resp. Result + # ------ ------ -------------------------------------------------- + # None None None + # None 8≤M≤15 M + # 8≤N≤15 None Error! + # 8≤N≤15 8≤M≤N M + # 8≤N≤15 N self.server_max_window_bits: + raise NegotiationError("unsupported server_max_window_bits") + + # client_max_window_bits + + # Req. Resp. Result + # ------ ------ -------------------------------------------------- + # None None None + # None 8≤M≤15 Error! + # True None None + # True 8≤M≤15 M + # 8≤N≤15 None N - must change value + # 8≤N≤15 8≤M≤N M + # 8≤N≤15 N self.client_max_window_bits: + raise NegotiationError("unsupported client_max_window_bits") + + return PerMessageDeflate( + server_no_context_takeover, # remote_no_context_takeover + client_no_context_takeover, # local_no_context_takeover + server_max_window_bits or 15, # remote_max_window_bits + client_max_window_bits or 15, # local_max_window_bits + self.compress_settings, + ) + + +def enable_client_permessage_deflate( + extensions: Sequence[ClientExtensionFactory] | None, +) -> Sequence[ClientExtensionFactory]: + """ + Enable Per-Message Deflate with default settings in client extensions. + + If the extension is already present, perhaps with non-default settings, + the configuration isn't changed. + + """ + if extensions is None: + extensions = [] + if not any( + extension_factory.name == ClientPerMessageDeflateFactory.name + for extension_factory in extensions + ): + extensions = list(extensions) + [ + ClientPerMessageDeflateFactory( + compress_settings={"memLevel": 5}, + ) + ] + return extensions + + +class ServerPerMessageDeflateFactory(ServerExtensionFactory): + """ + Server-side extension factory for the Per-Message Deflate extension. + + Parameters behave as described in `section 7.1 of RFC 7692`_. + + .. _section 7.1 of RFC 7692: https://datatracker.ietf.org/doc/html/rfc7692#section-7.1 + + Set them to :obj:`True` to include them in the negotiation offer without a + value or to an integer value to include them with this value. + + Args: + server_no_context_takeover: Prevent server from using context takeover. + client_no_context_takeover: Prevent client from using context takeover. + server_max_window_bits: Maximum size of the server's LZ77 sliding window + in bits, between 8 and 15. + client_max_window_bits: Maximum size of the client's LZ77 sliding window + in bits, between 8 and 15. + compress_settings: Additional keyword arguments for :func:`zlib.compressobj`, + excluding ``wbits``. + require_client_max_window_bits: Do not enable compression at all if + client doesn't advertise support for ``client_max_window_bits``; + the default behavior is to enable compression without enforcing + ``client_max_window_bits``. + + """ + + name = ExtensionName("permessage-deflate") + + def __init__( + self, + server_no_context_takeover: bool = False, + client_no_context_takeover: bool = False, + server_max_window_bits: int | None = None, + client_max_window_bits: int | None = None, + compress_settings: dict[str, Any] | None = None, + require_client_max_window_bits: bool = False, + ) -> None: + """ + Configure the Per-Message Deflate extension factory. + + """ + if not (server_max_window_bits is None or 8 <= server_max_window_bits <= 15): + raise ValueError("server_max_window_bits must be between 8 and 15") + if not (client_max_window_bits is None or 8 <= client_max_window_bits <= 15): + raise ValueError("client_max_window_bits must be between 8 and 15") + if compress_settings is not None and "wbits" in compress_settings: + raise ValueError( + "compress_settings must not include wbits, " + "set server_max_window_bits instead" + ) + if client_max_window_bits is None and require_client_max_window_bits: + raise ValueError( + "require_client_max_window_bits is enabled, " + "but client_max_window_bits isn't configured" + ) + + self.server_no_context_takeover = server_no_context_takeover + self.client_no_context_takeover = client_no_context_takeover + self.server_max_window_bits = server_max_window_bits + self.client_max_window_bits = client_max_window_bits + self.compress_settings = compress_settings + self.require_client_max_window_bits = require_client_max_window_bits + + def process_request_params( + self, + params: Sequence[ExtensionParameter], + accepted_extensions: Sequence[Extension], + ) -> tuple[list[ExtensionParameter], PerMessageDeflate]: + """ + Process request parameters. + + Return response params and an extension instance. + + """ + if any(other.name == self.name for other in accepted_extensions): + raise NegotiationError(f"skipped duplicate {self.name}") + + # Load request parameters in local variables. + ( + server_no_context_takeover, + client_no_context_takeover, + server_max_window_bits, + client_max_window_bits, + ) = _extract_parameters(params, is_server=True) + + # Configuration parameters are available in instance variables. + + # After comparing the request and the configuration, the response must + # be available in the local variables. + + # server_no_context_takeover + # + # Config Req. Resp. + # ------ ------ -------------------------------------------------- + # False False False + # False True True + # True False True - must change value to True + # True True True + + if self.server_no_context_takeover: + if not server_no_context_takeover: + server_no_context_takeover = True + + # client_no_context_takeover + # + # Config Req. Resp. + # ------ ------ -------------------------------------------------- + # False False False + # False True True (or False) + # True False True - must change value to True + # True True True (or False) + + if self.client_no_context_takeover: + if not client_no_context_takeover: + client_no_context_takeover = True + + # server_max_window_bits + + # Config Req. Resp. + # ------ ------ -------------------------------------------------- + # None None None + # None 8≤M≤15 M + # 8≤N≤15 None N - must change value + # 8≤N≤15 8≤M≤N M + # 8≤N≤15 N self.server_max_window_bits: + server_max_window_bits = self.server_max_window_bits + + # client_max_window_bits + + # Config Req. Resp. + # ------ ------ -------------------------------------------------- + # None None None + # None True None - must change value + # None 8≤M≤15 M (or None) + # 8≤N≤15 None None or Error! + # 8≤N≤15 True N - must change value + # 8≤N≤15 8≤M≤N M (or None) + # 8≤N≤15 N Sequence[ServerExtensionFactory]: + """ + Enable Per-Message Deflate with default settings in server extensions. + + If the extension is already present, perhaps with non-default settings, + the configuration isn't changed. + + """ + if extensions is None: + extensions = [] + if not any( + ext_factory.name == ServerPerMessageDeflateFactory.name + for ext_factory in extensions + ): + extensions = list(extensions) + [ + ServerPerMessageDeflateFactory( + server_max_window_bits=12, + client_max_window_bits=12, + compress_settings={"memLevel": 5}, + ) + ] + return extensions diff --git a/hackaton/lib/python3.12/site-packages/websockets/frames.py b/hackaton/lib/python3.12/site-packages/websockets/frames.py new file mode 100644 index 0000000..a63bdc3 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/websockets/frames.py @@ -0,0 +1,429 @@ +from __future__ import annotations + +import dataclasses +import enum +import io +import os +import secrets +import struct +from typing import Callable, Generator, Sequence + +from .exceptions import PayloadTooBig, ProtocolError + + +try: + from .speedups import apply_mask +except ImportError: + from .utils import apply_mask + + +__all__ = [ + "Opcode", + "OP_CONT", + "OP_TEXT", + "OP_BINARY", + "OP_CLOSE", + "OP_PING", + "OP_PONG", + "DATA_OPCODES", + "CTRL_OPCODES", + "Frame", + "Close", +] + + +class Opcode(enum.IntEnum): + """Opcode values for WebSocket frames.""" + + CONT, TEXT, BINARY = 0x00, 0x01, 0x02 + CLOSE, PING, PONG = 0x08, 0x09, 0x0A + + +OP_CONT = Opcode.CONT +OP_TEXT = Opcode.TEXT +OP_BINARY = Opcode.BINARY +OP_CLOSE = Opcode.CLOSE +OP_PING = Opcode.PING +OP_PONG = Opcode.PONG + +DATA_OPCODES = OP_CONT, OP_TEXT, OP_BINARY +CTRL_OPCODES = OP_CLOSE, OP_PING, OP_PONG + + +class CloseCode(enum.IntEnum): + """Close code values for WebSocket close frames.""" + + NORMAL_CLOSURE = 1000 + GOING_AWAY = 1001 + PROTOCOL_ERROR = 1002 + UNSUPPORTED_DATA = 1003 + # 1004 is reserved + NO_STATUS_RCVD = 1005 + ABNORMAL_CLOSURE = 1006 + INVALID_DATA = 1007 + POLICY_VIOLATION = 1008 + MESSAGE_TOO_BIG = 1009 + MANDATORY_EXTENSION = 1010 + INTERNAL_ERROR = 1011 + SERVICE_RESTART = 1012 + TRY_AGAIN_LATER = 1013 + BAD_GATEWAY = 1014 + TLS_HANDSHAKE = 1015 + + +# See https://www.iana.org/assignments/websocket/websocket.xhtml +CLOSE_CODE_EXPLANATIONS: dict[int, str] = { + CloseCode.NORMAL_CLOSURE: "OK", + CloseCode.GOING_AWAY: "going away", + CloseCode.PROTOCOL_ERROR: "protocol error", + CloseCode.UNSUPPORTED_DATA: "unsupported data", + CloseCode.NO_STATUS_RCVD: "no status received [internal]", + CloseCode.ABNORMAL_CLOSURE: "abnormal closure [internal]", + CloseCode.INVALID_DATA: "invalid frame payload data", + CloseCode.POLICY_VIOLATION: "policy violation", + CloseCode.MESSAGE_TOO_BIG: "message too big", + CloseCode.MANDATORY_EXTENSION: "mandatory extension", + CloseCode.INTERNAL_ERROR: "internal error", + CloseCode.SERVICE_RESTART: "service restart", + CloseCode.TRY_AGAIN_LATER: "try again later", + CloseCode.BAD_GATEWAY: "bad gateway", + CloseCode.TLS_HANDSHAKE: "TLS handshake failure [internal]", +} + + +# Close code that are allowed in a close frame. +# Using a set optimizes `code in EXTERNAL_CLOSE_CODES`. +EXTERNAL_CLOSE_CODES = { + CloseCode.NORMAL_CLOSURE, + CloseCode.GOING_AWAY, + CloseCode.PROTOCOL_ERROR, + CloseCode.UNSUPPORTED_DATA, + CloseCode.INVALID_DATA, + CloseCode.POLICY_VIOLATION, + CloseCode.MESSAGE_TOO_BIG, + CloseCode.MANDATORY_EXTENSION, + CloseCode.INTERNAL_ERROR, + CloseCode.SERVICE_RESTART, + CloseCode.TRY_AGAIN_LATER, + CloseCode.BAD_GATEWAY, +} + + +OK_CLOSE_CODES = { + CloseCode.NORMAL_CLOSURE, + CloseCode.GOING_AWAY, + CloseCode.NO_STATUS_RCVD, +} + + +BytesLike = bytes, bytearray, memoryview + + +@dataclasses.dataclass +class Frame: + """ + WebSocket frame. + + Attributes: + opcode: Opcode. + data: Payload data. + fin: FIN bit. + rsv1: RSV1 bit. + rsv2: RSV2 bit. + rsv3: RSV3 bit. + + Only these fields are needed. The MASK bit, payload length and masking-key + are handled on the fly when parsing and serializing frames. + + """ + + opcode: Opcode + data: bytes + fin: bool = True + rsv1: bool = False + rsv2: bool = False + rsv3: bool = False + + # Configure if you want to see more in logs. Should be a multiple of 3. + MAX_LOG_SIZE = int(os.environ.get("WEBSOCKETS_MAX_LOG_SIZE", "75")) + + def __str__(self) -> str: + """ + Return a human-readable representation of a frame. + + """ + coding = None + length = f"{len(self.data)} byte{'' if len(self.data) == 1 else 's'}" + non_final = "" if self.fin else "continued" + + if self.opcode is OP_TEXT: + # Decoding only the beginning and the end is needlessly hard. + # Decode the entire payload then elide later if necessary. + data = repr(self.data.decode()) + elif self.opcode is OP_BINARY: + # We'll show at most the first 16 bytes and the last 8 bytes. + # Encode just what we need, plus two dummy bytes to elide later. + binary = self.data + if len(binary) > self.MAX_LOG_SIZE // 3: + cut = (self.MAX_LOG_SIZE // 3 - 1) // 3 # by default cut = 8 + binary = b"".join([binary[: 2 * cut], b"\x00\x00", binary[-cut:]]) + data = " ".join(f"{byte:02x}" for byte in binary) + elif self.opcode is OP_CLOSE: + data = str(Close.parse(self.data)) + elif self.data: + # We don't know if a Continuation frame contains text or binary. + # Ping and Pong frames could contain UTF-8. + # Attempt to decode as UTF-8 and display it as text; fallback to + # binary. If self.data is a memoryview, it has no decode() method, + # which raises AttributeError. + try: + data = repr(self.data.decode()) + coding = "text" + except (UnicodeDecodeError, AttributeError): + binary = self.data + if len(binary) > self.MAX_LOG_SIZE // 3: + cut = (self.MAX_LOG_SIZE // 3 - 1) // 3 # by default cut = 8 + binary = b"".join([binary[: 2 * cut], b"\x00\x00", binary[-cut:]]) + data = " ".join(f"{byte:02x}" for byte in binary) + coding = "binary" + else: + data = "''" + + if len(data) > self.MAX_LOG_SIZE: + cut = self.MAX_LOG_SIZE // 3 - 1 # by default cut = 24 + data = data[: 2 * cut] + "..." + data[-cut:] + + metadata = ", ".join(filter(None, [coding, length, non_final])) + + return f"{self.opcode.name} {data} [{metadata}]" + + @classmethod + def parse( + cls, + read_exact: Callable[[int], Generator[None, None, bytes]], + *, + mask: bool, + max_size: int | None = None, + extensions: Sequence[extensions.Extension] | None = None, + ) -> Generator[None, None, Frame]: + """ + Parse a WebSocket frame. + + This is a generator-based coroutine. + + Args: + read_exact: Generator-based coroutine that reads the requested + bytes or raises an exception if there isn't enough data. + mask: Whether the frame should be masked i.e. whether the read + happens on the server side. + max_size: Maximum payload size in bytes. + extensions: List of extensions, applied in reverse order. + + Raises: + EOFError: If the connection is closed without a full WebSocket frame. + UnicodeDecodeError: If the frame contains invalid UTF-8. + PayloadTooBig: If the frame's payload size exceeds ``max_size``. + ProtocolError: If the frame contains incorrect values. + + """ + # Read the header. + data = yield from read_exact(2) + head1, head2 = struct.unpack("!BB", data) + + # While not Pythonic, this is marginally faster than calling bool(). + fin = True if head1 & 0b10000000 else False + rsv1 = True if head1 & 0b01000000 else False + rsv2 = True if head1 & 0b00100000 else False + rsv3 = True if head1 & 0b00010000 else False + + try: + opcode = Opcode(head1 & 0b00001111) + except ValueError as exc: + raise ProtocolError("invalid opcode") from exc + + if (True if head2 & 0b10000000 else False) != mask: + raise ProtocolError("incorrect masking") + + length = head2 & 0b01111111 + if length == 126: + data = yield from read_exact(2) + (length,) = struct.unpack("!H", data) + elif length == 127: + data = yield from read_exact(8) + (length,) = struct.unpack("!Q", data) + if max_size is not None and length > max_size: + raise PayloadTooBig(f"over size limit ({length} > {max_size} bytes)") + if mask: + mask_bytes = yield from read_exact(4) + + # Read the data. + data = yield from read_exact(length) + if mask: + data = apply_mask(data, mask_bytes) + + frame = cls(opcode, data, fin, rsv1, rsv2, rsv3) + + if extensions is None: + extensions = [] + for extension in reversed(extensions): + frame = extension.decode(frame, max_size=max_size) + + frame.check() + + return frame + + def serialize( + self, + *, + mask: bool, + extensions: Sequence[extensions.Extension] | None = None, + ) -> bytes: + """ + Serialize a WebSocket frame. + + Args: + mask: Whether the frame should be masked i.e. whether the write + happens on the client side. + extensions: List of extensions, applied in order. + + Raises: + ProtocolError: If the frame contains incorrect values. + + """ + self.check() + + if extensions is None: + extensions = [] + for extension in extensions: + self = extension.encode(self) + + output = io.BytesIO() + + # Prepare the header. + head1 = ( + (0b10000000 if self.fin else 0) + | (0b01000000 if self.rsv1 else 0) + | (0b00100000 if self.rsv2 else 0) + | (0b00010000 if self.rsv3 else 0) + | self.opcode + ) + + head2 = 0b10000000 if mask else 0 + + length = len(self.data) + if length < 126: + output.write(struct.pack("!BB", head1, head2 | length)) + elif length < 65536: + output.write(struct.pack("!BBH", head1, head2 | 126, length)) + else: + output.write(struct.pack("!BBQ", head1, head2 | 127, length)) + + if mask: + mask_bytes = secrets.token_bytes(4) + output.write(mask_bytes) + + # Prepare the data. + if mask: + data = apply_mask(self.data, mask_bytes) + else: + data = self.data + output.write(data) + + return output.getvalue() + + def check(self) -> None: + """ + Check that reserved bits and opcode have acceptable values. + + Raises: + ProtocolError: If a reserved bit or the opcode is invalid. + + """ + if self.rsv1 or self.rsv2 or self.rsv3: + raise ProtocolError("reserved bits must be 0") + + if self.opcode in CTRL_OPCODES: + if len(self.data) > 125: + raise ProtocolError("control frame too long") + if not self.fin: + raise ProtocolError("fragmented control frame") + + +@dataclasses.dataclass +class Close: + """ + Code and reason for WebSocket close frames. + + Attributes: + code: Close code. + reason: Close reason. + + """ + + code: int + reason: str + + def __str__(self) -> str: + """ + Return a human-readable representation of a close code and reason. + + """ + if 3000 <= self.code < 4000: + explanation = "registered" + elif 4000 <= self.code < 5000: + explanation = "private use" + else: + explanation = CLOSE_CODE_EXPLANATIONS.get(self.code, "unknown") + result = f"{self.code} ({explanation})" + + if self.reason: + result = f"{result} {self.reason}" + + return result + + @classmethod + def parse(cls, data: bytes) -> Close: + """ + Parse the payload of a close frame. + + Args: + data: Payload of the close frame. + + Raises: + ProtocolError: If data is ill-formed. + UnicodeDecodeError: If the reason isn't valid UTF-8. + + """ + if len(data) >= 2: + (code,) = struct.unpack("!H", data[:2]) + reason = data[2:].decode() + close = cls(code, reason) + close.check() + return close + elif len(data) == 0: + return cls(CloseCode.NO_STATUS_RCVD, "") + else: + raise ProtocolError("close frame too short") + + def serialize(self) -> bytes: + """ + Serialize the payload of a close frame. + + """ + self.check() + return struct.pack("!H", self.code) + self.reason.encode() + + def check(self) -> None: + """ + Check that the close code has a valid value for a close frame. + + Raises: + ProtocolError: If the close code is invalid. + + """ + if not (self.code in EXTERNAL_CLOSE_CODES or 3000 <= self.code < 5000): + raise ProtocolError("invalid status code") + + +# At the bottom to break import cycles created by type annotations. +from . import extensions # noqa: E402 diff --git a/hackaton/lib/python3.12/site-packages/websockets/headers.py b/hackaton/lib/python3.12/site-packages/websockets/headers.py new file mode 100644 index 0000000..9103018 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/websockets/headers.py @@ -0,0 +1,579 @@ +from __future__ import annotations + +import base64 +import binascii +import ipaddress +import re +from typing import Callable, Sequence, TypeVar, cast + +from .exceptions import InvalidHeaderFormat, InvalidHeaderValue +from .typing import ( + ConnectionOption, + ExtensionHeader, + ExtensionName, + ExtensionParameter, + Subprotocol, + UpgradeProtocol, +) + + +__all__ = [ + "build_host", + "parse_connection", + "parse_upgrade", + "parse_extension", + "build_extension", + "parse_subprotocol", + "build_subprotocol", + "validate_subprotocols", + "build_www_authenticate_basic", + "parse_authorization_basic", + "build_authorization_basic", +] + + +T = TypeVar("T") + + +def build_host(host: str, port: int, secure: bool) -> str: + """ + Build a ``Host`` header. + + """ + # https://datatracker.ietf.org/doc/html/rfc3986#section-3.2.2 + # IPv6 addresses must be enclosed in brackets. + try: + address = ipaddress.ip_address(host) + except ValueError: + # host is a hostname + pass + else: + # host is an IP address + if address.version == 6: + host = f"[{host}]" + + if port != (443 if secure else 80): + host = f"{host}:{port}" + + return host + + +# To avoid a dependency on a parsing library, we implement manually the ABNF +# described in https://datatracker.ietf.org/doc/html/rfc6455#section-9.1 and +# https://datatracker.ietf.org/doc/html/rfc7230#appendix-B. + + +def peek_ahead(header: str, pos: int) -> str | None: + """ + Return the next character from ``header`` at the given position. + + Return :obj:`None` at the end of ``header``. + + We never need to peek more than one character ahead. + + """ + return None if pos == len(header) else header[pos] + + +_OWS_re = re.compile(r"[\t ]*") + + +def parse_OWS(header: str, pos: int) -> int: + """ + Parse optional whitespace from ``header`` at the given position. + + Return the new position. + + The whitespace itself isn't returned because it isn't significant. + + """ + # There's always a match, possibly empty, whose content doesn't matter. + match = _OWS_re.match(header, pos) + assert match is not None + return match.end() + + +_token_re = re.compile(r"[-!#$%&\'*+.^_`|~0-9a-zA-Z]+") + + +def parse_token(header: str, pos: int, header_name: str) -> tuple[str, int]: + """ + Parse a token from ``header`` at the given position. + + Return the token value and the new position. + + Raises: + InvalidHeaderFormat: On invalid inputs. + + """ + match = _token_re.match(header, pos) + if match is None: + raise InvalidHeaderFormat(header_name, "expected token", header, pos) + return match.group(), match.end() + + +_quoted_string_re = re.compile( + r'"(?:[\x09\x20-\x21\x23-\x5b\x5d-\x7e]|\\[\x09\x20-\x7e\x80-\xff])*"' +) + + +_unquote_re = re.compile(r"\\([\x09\x20-\x7e\x80-\xff])") + + +def parse_quoted_string(header: str, pos: int, header_name: str) -> tuple[str, int]: + """ + Parse a quoted string from ``header`` at the given position. + + Return the unquoted value and the new position. + + Raises: + InvalidHeaderFormat: On invalid inputs. + + """ + match = _quoted_string_re.match(header, pos) + if match is None: + raise InvalidHeaderFormat(header_name, "expected quoted string", header, pos) + return _unquote_re.sub(r"\1", match.group()[1:-1]), match.end() + + +_quotable_re = re.compile(r"[\x09\x20-\x7e\x80-\xff]*") + + +_quote_re = re.compile(r"([\x22\x5c])") + + +def build_quoted_string(value: str) -> str: + """ + Format ``value`` as a quoted string. + + This is the reverse of :func:`parse_quoted_string`. + + """ + match = _quotable_re.fullmatch(value) + if match is None: + raise ValueError("invalid characters for quoted-string encoding") + return '"' + _quote_re.sub(r"\\\1", value) + '"' + + +def parse_list( + parse_item: Callable[[str, int, str], tuple[T, int]], + header: str, + pos: int, + header_name: str, +) -> list[T]: + """ + Parse a comma-separated list from ``header`` at the given position. + + This is appropriate for parsing values with the following grammar: + + 1#item + + ``parse_item`` parses one item. + + ``header`` is assumed not to start or end with whitespace. + + (This function is designed for parsing an entire header value and + :func:`~websockets.http.read_headers` strips whitespace from values.) + + Return a list of items. + + Raises: + InvalidHeaderFormat: On invalid inputs. + + """ + # Per https://datatracker.ietf.org/doc/html/rfc7230#section-7, "a recipient + # MUST parse and ignore a reasonable number of empty list elements"; + # hence while loops that remove extra delimiters. + + # Remove extra delimiters before the first item. + while peek_ahead(header, pos) == ",": + pos = parse_OWS(header, pos + 1) + + items = [] + while True: + # Loop invariant: a item starts at pos in header. + item, pos = parse_item(header, pos, header_name) + items.append(item) + pos = parse_OWS(header, pos) + + # We may have reached the end of the header. + if pos == len(header): + break + + # There must be a delimiter after each element except the last one. + if peek_ahead(header, pos) == ",": + pos = parse_OWS(header, pos + 1) + else: + raise InvalidHeaderFormat(header_name, "expected comma", header, pos) + + # Remove extra delimiters before the next item. + while peek_ahead(header, pos) == ",": + pos = parse_OWS(header, pos + 1) + + # We may have reached the end of the header. + if pos == len(header): + break + + # Since we only advance in the header by one character with peek_ahead() + # or with the end position of a regex match, we can't overshoot the end. + assert pos == len(header) + + return items + + +def parse_connection_option( + header: str, pos: int, header_name: str +) -> tuple[ConnectionOption, int]: + """ + Parse a Connection option from ``header`` at the given position. + + Return the protocol value and the new position. + + Raises: + InvalidHeaderFormat: On invalid inputs. + + """ + item, pos = parse_token(header, pos, header_name) + return cast(ConnectionOption, item), pos + + +def parse_connection(header: str) -> list[ConnectionOption]: + """ + Parse a ``Connection`` header. + + Return a list of HTTP connection options. + + Args + header: value of the ``Connection`` header. + + Raises: + InvalidHeaderFormat: On invalid inputs. + + """ + return parse_list(parse_connection_option, header, 0, "Connection") + + +_protocol_re = re.compile( + r"[-!#$%&\'*+.^_`|~0-9a-zA-Z]+(?:/[-!#$%&\'*+.^_`|~0-9a-zA-Z]+)?" +) + + +def parse_upgrade_protocol( + header: str, pos: int, header_name: str +) -> tuple[UpgradeProtocol, int]: + """ + Parse an Upgrade protocol from ``header`` at the given position. + + Return the protocol value and the new position. + + Raises: + InvalidHeaderFormat: On invalid inputs. + + """ + match = _protocol_re.match(header, pos) + if match is None: + raise InvalidHeaderFormat(header_name, "expected protocol", header, pos) + return cast(UpgradeProtocol, match.group()), match.end() + + +def parse_upgrade(header: str) -> list[UpgradeProtocol]: + """ + Parse an ``Upgrade`` header. + + Return a list of HTTP protocols. + + Args: + header: Value of the ``Upgrade`` header. + + Raises: + InvalidHeaderFormat: On invalid inputs. + + """ + return parse_list(parse_upgrade_protocol, header, 0, "Upgrade") + + +def parse_extension_item_param( + header: str, pos: int, header_name: str +) -> tuple[ExtensionParameter, int]: + """ + Parse a single extension parameter from ``header`` at the given position. + + Return a ``(name, value)`` pair and the new position. + + Raises: + InvalidHeaderFormat: On invalid inputs. + + """ + # Extract parameter name. + name, pos = parse_token(header, pos, header_name) + pos = parse_OWS(header, pos) + # Extract parameter value, if there is one. + value: str | None = None + if peek_ahead(header, pos) == "=": + pos = parse_OWS(header, pos + 1) + if peek_ahead(header, pos) == '"': + pos_before = pos # for proper error reporting below + value, pos = parse_quoted_string(header, pos, header_name) + # https://datatracker.ietf.org/doc/html/rfc6455#section-9.1 says: + # the value after quoted-string unescaping MUST conform to + # the 'token' ABNF. + if _token_re.fullmatch(value) is None: + raise InvalidHeaderFormat( + header_name, "invalid quoted header content", header, pos_before + ) + else: + value, pos = parse_token(header, pos, header_name) + pos = parse_OWS(header, pos) + + return (name, value), pos + + +def parse_extension_item( + header: str, pos: int, header_name: str +) -> tuple[ExtensionHeader, int]: + """ + Parse an extension definition from ``header`` at the given position. + + Return an ``(extension name, parameters)`` pair, where ``parameters`` is a + list of ``(name, value)`` pairs, and the new position. + + Raises: + InvalidHeaderFormat: On invalid inputs. + + """ + # Extract extension name. + name, pos = parse_token(header, pos, header_name) + pos = parse_OWS(header, pos) + # Extract all parameters. + parameters = [] + while peek_ahead(header, pos) == ";": + pos = parse_OWS(header, pos + 1) + parameter, pos = parse_extension_item_param(header, pos, header_name) + parameters.append(parameter) + return (cast(ExtensionName, name), parameters), pos + + +def parse_extension(header: str) -> list[ExtensionHeader]: + """ + Parse a ``Sec-WebSocket-Extensions`` header. + + Return a list of WebSocket extensions and their parameters in this format:: + + [ + ( + 'extension name', + [ + ('parameter name', 'parameter value'), + .... + ] + ), + ... + ] + + Parameter values are :obj:`None` when no value is provided. + + Raises: + InvalidHeaderFormat: On invalid inputs. + + """ + return parse_list(parse_extension_item, header, 0, "Sec-WebSocket-Extensions") + + +parse_extension_list = parse_extension # alias for backwards compatibility + + +def build_extension_item( + name: ExtensionName, parameters: list[ExtensionParameter] +) -> str: + """ + Build an extension definition. + + This is the reverse of :func:`parse_extension_item`. + + """ + return "; ".join( + [cast(str, name)] + + [ + # Quoted strings aren't necessary because values are always tokens. + name if value is None else f"{name}={value}" + for name, value in parameters + ] + ) + + +def build_extension(extensions: Sequence[ExtensionHeader]) -> str: + """ + Build a ``Sec-WebSocket-Extensions`` header. + + This is the reverse of :func:`parse_extension`. + + """ + return ", ".join( + build_extension_item(name, parameters) for name, parameters in extensions + ) + + +build_extension_list = build_extension # alias for backwards compatibility + + +def parse_subprotocol_item( + header: str, pos: int, header_name: str +) -> tuple[Subprotocol, int]: + """ + Parse a subprotocol from ``header`` at the given position. + + Return the subprotocol value and the new position. + + Raises: + InvalidHeaderFormat: On invalid inputs. + + """ + item, pos = parse_token(header, pos, header_name) + return cast(Subprotocol, item), pos + + +def parse_subprotocol(header: str) -> list[Subprotocol]: + """ + Parse a ``Sec-WebSocket-Protocol`` header. + + Return a list of WebSocket subprotocols. + + Raises: + InvalidHeaderFormat: On invalid inputs. + + """ + return parse_list(parse_subprotocol_item, header, 0, "Sec-WebSocket-Protocol") + + +parse_subprotocol_list = parse_subprotocol # alias for backwards compatibility + + +def build_subprotocol(subprotocols: Sequence[Subprotocol]) -> str: + """ + Build a ``Sec-WebSocket-Protocol`` header. + + This is the reverse of :func:`parse_subprotocol`. + + """ + return ", ".join(subprotocols) + + +build_subprotocol_list = build_subprotocol # alias for backwards compatibility + + +def validate_subprotocols(subprotocols: Sequence[Subprotocol]) -> None: + """ + Validate that ``subprotocols`` is suitable for :func:`build_subprotocol`. + + """ + if not isinstance(subprotocols, Sequence): + raise TypeError("subprotocols must be a list") + if isinstance(subprotocols, str): + raise TypeError("subprotocols must be a list, not a str") + for subprotocol in subprotocols: + if not _token_re.fullmatch(subprotocol): + raise ValueError(f"invalid subprotocol: {subprotocol}") + + +def build_www_authenticate_basic(realm: str) -> str: + """ + Build a ``WWW-Authenticate`` header for HTTP Basic Auth. + + Args: + realm: Identifier of the protection space. + + """ + # https://datatracker.ietf.org/doc/html/rfc7617#section-2 + realm = build_quoted_string(realm) + charset = build_quoted_string("UTF-8") + return f"Basic realm={realm}, charset={charset}" + + +_token68_re = re.compile(r"[A-Za-z0-9-._~+/]+=*") + + +def parse_token68(header: str, pos: int, header_name: str) -> tuple[str, int]: + """ + Parse a token68 from ``header`` at the given position. + + Return the token value and the new position. + + Raises: + InvalidHeaderFormat: On invalid inputs. + + """ + match = _token68_re.match(header, pos) + if match is None: + raise InvalidHeaderFormat(header_name, "expected token68", header, pos) + return match.group(), match.end() + + +def parse_end(header: str, pos: int, header_name: str) -> None: + """ + Check that parsing reached the end of header. + + """ + if pos < len(header): + raise InvalidHeaderFormat(header_name, "trailing data", header, pos) + + +def parse_authorization_basic(header: str) -> tuple[str, str]: + """ + Parse an ``Authorization`` header for HTTP Basic Auth. + + Return a ``(username, password)`` tuple. + + Args: + header: Value of the ``Authorization`` header. + + Raises: + InvalidHeaderFormat: On invalid inputs. + InvalidHeaderValue: On unsupported inputs. + + """ + # https://datatracker.ietf.org/doc/html/rfc7235#section-2.1 + # https://datatracker.ietf.org/doc/html/rfc7617#section-2 + scheme, pos = parse_token(header, 0, "Authorization") + if scheme.lower() != "basic": + raise InvalidHeaderValue( + "Authorization", + f"unsupported scheme: {scheme}", + ) + if peek_ahead(header, pos) != " ": + raise InvalidHeaderFormat( + "Authorization", "expected space after scheme", header, pos + ) + pos += 1 + basic_credentials, pos = parse_token68(header, pos, "Authorization") + parse_end(header, pos, "Authorization") + + try: + user_pass = base64.b64decode(basic_credentials.encode()).decode() + except binascii.Error: + raise InvalidHeaderValue( + "Authorization", + "expected base64-encoded credentials", + ) from None + try: + username, password = user_pass.split(":", 1) + except ValueError: + raise InvalidHeaderValue( + "Authorization", + "expected username:password credentials", + ) from None + + return username, password + + +def build_authorization_basic(username: str, password: str) -> str: + """ + Build an ``Authorization`` header for HTTP Basic Auth. + + This is the reverse of :func:`parse_authorization_basic`. + + """ + # https://datatracker.ietf.org/doc/html/rfc7617#section-2 + assert ":" not in username + user_pass = f"{username}:{password}" + basic_credentials = base64.b64encode(user_pass.encode()).decode() + return "Basic " + basic_credentials diff --git a/hackaton/lib/python3.12/site-packages/websockets/http.py b/hackaton/lib/python3.12/site-packages/websockets/http.py new file mode 100644 index 0000000..0ff5598 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/websockets/http.py @@ -0,0 +1,15 @@ +from __future__ import annotations + +import warnings + +from .datastructures import Headers, MultipleValuesError # noqa: F401 +from .legacy.http import read_request, read_response # noqa: F401 + + +warnings.warn( # deprecated in 9.0 - 2021-09-01 + "Headers and MultipleValuesError were moved " + "from websockets.http to websockets.datastructures" + "and read_request and read_response were moved " + "from websockets.http to websockets.legacy.http", + DeprecationWarning, +) diff --git a/hackaton/lib/python3.12/site-packages/websockets/http11.py b/hackaton/lib/python3.12/site-packages/websockets/http11.py new file mode 100644 index 0000000..47cef7a --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/websockets/http11.py @@ -0,0 +1,385 @@ +from __future__ import annotations + +import dataclasses +import os +import re +import sys +import warnings +from typing import Callable, Generator + +from .datastructures import Headers +from .exceptions import SecurityError +from .version import version as websockets_version + + +__all__ = ["SERVER", "USER_AGENT", "Request", "Response"] + + +PYTHON_VERSION = "{}.{}".format(*sys.version_info) + +# User-Agent header for HTTP requests. +USER_AGENT = os.environ.get( + "WEBSOCKETS_USER_AGENT", + f"Python/{PYTHON_VERSION} websockets/{websockets_version}", +) + +# Server header for HTTP responses. +SERVER = os.environ.get( + "WEBSOCKETS_SERVER", + f"Python/{PYTHON_VERSION} websockets/{websockets_version}", +) + +# Maximum total size of headers is around 128 * 8 KiB = 1 MiB. +MAX_NUM_HEADERS = int(os.environ.get("WEBSOCKETS_MAX_NUM_HEADERS", "128")) + +# Limit request line and header lines. 8KiB is the most common default +# configuration of popular HTTP servers. +MAX_LINE_LENGTH = int(os.environ.get("WEBSOCKETS_MAX_LINE_LENGTH", "8192")) + +# Support for HTTP response bodies is intended to read an error message +# returned by a server. It isn't designed to perform large file transfers. +MAX_BODY_SIZE = int(os.environ.get("WEBSOCKETS_MAX_BODY_SIZE", "1_048_576")) # 1 MiB + + +def d(value: bytes) -> str: + """ + Decode a bytestring for interpolating into an error message. + + """ + return value.decode(errors="backslashreplace") + + +# See https://datatracker.ietf.org/doc/html/rfc7230#appendix-B. + +# Regex for validating header names. + +_token_re = re.compile(rb"[-!#$%&\'*+.^_`|~0-9a-zA-Z]+") + +# Regex for validating header values. + +# We don't attempt to support obsolete line folding. + +# Include HTAB (\x09), SP (\x20), VCHAR (\x21-\x7e), obs-text (\x80-\xff). + +# The ABNF is complicated because it attempts to express that optional +# whitespace is ignored. We strip whitespace and don't revalidate that. + +# See also https://www.rfc-editor.org/errata_search.php?rfc=7230&eid=4189 + +_value_re = re.compile(rb"[\x09\x20-\x7e\x80-\xff]*") + + +@dataclasses.dataclass +class Request: + """ + WebSocket handshake request. + + Attributes: + path: Request path, including optional query. + headers: Request headers. + """ + + path: str + headers: Headers + # body isn't useful is the context of this library. + + _exception: Exception | None = None + + @property + def exception(self) -> Exception | None: # pragma: no cover + warnings.warn( # deprecated in 10.3 - 2022-04-17 + "Request.exception is deprecated; " + "use ServerProtocol.handshake_exc instead", + DeprecationWarning, + ) + return self._exception + + @classmethod + def parse( + cls, + read_line: Callable[[int], Generator[None, None, bytes]], + ) -> Generator[None, None, Request]: + """ + Parse a WebSocket handshake request. + + This is a generator-based coroutine. + + The request path isn't URL-decoded or validated in any way. + + The request path and headers are expected to contain only ASCII + characters. Other characters are represented with surrogate escapes. + + :meth:`parse` doesn't attempt to read the request body because + WebSocket handshake requests don't have one. If the request contains a + body, it may be read from the data stream after :meth:`parse` returns. + + Args: + read_line: Generator-based coroutine that reads a LF-terminated + line or raises an exception if there isn't enough data + + Raises: + EOFError: If the connection is closed without a full HTTP request. + SecurityError: If the request exceeds a security limit. + ValueError: If the request isn't well formatted. + + """ + # https://datatracker.ietf.org/doc/html/rfc7230#section-3.1.1 + + # Parsing is simple because fixed values are expected for method and + # version and because path isn't checked. Since WebSocket software tends + # to implement HTTP/1.1 strictly, there's little need for lenient parsing. + + try: + request_line = yield from parse_line(read_line) + except EOFError as exc: + raise EOFError("connection closed while reading HTTP request line") from exc + + try: + method, raw_path, protocol = request_line.split(b" ", 2) + except ValueError: # not enough values to unpack (expected 3, got 1-2) + raise ValueError(f"invalid HTTP request line: {d(request_line)}") from None + if protocol != b"HTTP/1.1": + raise ValueError( + f"unsupported protocol; expected HTTP/1.1: {d(request_line)}" + ) + if method != b"GET": + raise ValueError(f"unsupported HTTP method; expected GET; got {d(method)}") + path = raw_path.decode("ascii", "surrogateescape") + + headers = yield from parse_headers(read_line) + + # https://datatracker.ietf.org/doc/html/rfc7230#section-3.3.3 + + if "Transfer-Encoding" in headers: + raise NotImplementedError("transfer codings aren't supported") + + if "Content-Length" in headers: + raise ValueError("unsupported request body") + + return cls(path, headers) + + def serialize(self) -> bytes: + """ + Serialize a WebSocket handshake request. + + """ + # Since the request line and headers only contain ASCII characters, + # we can keep this simple. + request = f"GET {self.path} HTTP/1.1\r\n".encode() + request += self.headers.serialize() + return request + + +@dataclasses.dataclass +class Response: + """ + WebSocket handshake response. + + Attributes: + status_code: Response code. + reason_phrase: Response reason. + headers: Response headers. + body: Response body, if any. + + """ + + status_code: int + reason_phrase: str + headers: Headers + body: bytes | None = None + + _exception: Exception | None = None + + @property + def exception(self) -> Exception | None: # pragma: no cover + warnings.warn( # deprecated in 10.3 - 2022-04-17 + "Response.exception is deprecated; " + "use ClientProtocol.handshake_exc instead", + DeprecationWarning, + ) + return self._exception + + @classmethod + def parse( + cls, + read_line: Callable[[int], Generator[None, None, bytes]], + read_exact: Callable[[int], Generator[None, None, bytes]], + read_to_eof: Callable[[int], Generator[None, None, bytes]], + ) -> Generator[None, None, Response]: + """ + Parse a WebSocket handshake response. + + This is a generator-based coroutine. + + The reason phrase and headers are expected to contain only ASCII + characters. Other characters are represented with surrogate escapes. + + Args: + read_line: Generator-based coroutine that reads a LF-terminated + line or raises an exception if there isn't enough data. + read_exact: Generator-based coroutine that reads the requested + bytes or raises an exception if there isn't enough data. + read_to_eof: Generator-based coroutine that reads until the end + of the stream. + + Raises: + EOFError: If the connection is closed without a full HTTP response. + SecurityError: If the response exceeds a security limit. + LookupError: If the response isn't well formatted. + ValueError: If the response isn't well formatted. + + """ + # https://datatracker.ietf.org/doc/html/rfc7230#section-3.1.2 + + try: + status_line = yield from parse_line(read_line) + except EOFError as exc: + raise EOFError("connection closed while reading HTTP status line") from exc + + try: + protocol, raw_status_code, raw_reason = status_line.split(b" ", 2) + except ValueError: # not enough values to unpack (expected 3, got 1-2) + raise ValueError(f"invalid HTTP status line: {d(status_line)}") from None + if protocol != b"HTTP/1.1": + raise ValueError( + f"unsupported protocol; expected HTTP/1.1: {d(status_line)}" + ) + try: + status_code = int(raw_status_code) + except ValueError: # invalid literal for int() with base 10 + raise ValueError( + f"invalid status code; expected integer; got {d(raw_status_code)}" + ) from None + if not 100 <= status_code < 600: + raise ValueError( + f"invalid status code; expected 100–599; got {d(raw_status_code)}" + ) + if not _value_re.fullmatch(raw_reason): + raise ValueError(f"invalid HTTP reason phrase: {d(raw_reason)}") + reason = raw_reason.decode("ascii", "surrogateescape") + + headers = yield from parse_headers(read_line) + + # https://datatracker.ietf.org/doc/html/rfc7230#section-3.3.3 + + if "Transfer-Encoding" in headers: + raise NotImplementedError("transfer codings aren't supported") + + # Since websockets only does GET requests (no HEAD, no CONNECT), all + # responses except 1xx, 204, and 304 include a message body. + if 100 <= status_code < 200 or status_code == 204 or status_code == 304: + body = None + else: + content_length: int | None + try: + # MultipleValuesError is sufficiently unlikely that we don't + # attempt to handle it. Instead we document that its parent + # class, LookupError, may be raised. + raw_content_length = headers["Content-Length"] + except KeyError: + content_length = None + else: + content_length = int(raw_content_length) + + if content_length is None: + try: + body = yield from read_to_eof(MAX_BODY_SIZE) + except RuntimeError: + raise SecurityError(f"body too large: over {MAX_BODY_SIZE} bytes") + elif content_length > MAX_BODY_SIZE: + raise SecurityError(f"body too large: {content_length} bytes") + else: + body = yield from read_exact(content_length) + + return cls(status_code, reason, headers, body) + + def serialize(self) -> bytes: + """ + Serialize a WebSocket handshake response. + + """ + # Since the status line and headers only contain ASCII characters, + # we can keep this simple. + response = f"HTTP/1.1 {self.status_code} {self.reason_phrase}\r\n".encode() + response += self.headers.serialize() + if self.body is not None: + response += self.body + return response + + +def parse_headers( + read_line: Callable[[int], Generator[None, None, bytes]], +) -> Generator[None, None, Headers]: + """ + Parse HTTP headers. + + Non-ASCII characters are represented with surrogate escapes. + + Args: + read_line: Generator-based coroutine that reads a LF-terminated line + or raises an exception if there isn't enough data. + + Raises: + EOFError: If the connection is closed without complete headers. + SecurityError: If the request exceeds a security limit. + ValueError: If the request isn't well formatted. + + """ + # https://datatracker.ietf.org/doc/html/rfc7230#section-3.2 + + # We don't attempt to support obsolete line folding. + + headers = Headers() + for _ in range(MAX_NUM_HEADERS + 1): + try: + line = yield from parse_line(read_line) + except EOFError as exc: + raise EOFError("connection closed while reading HTTP headers") from exc + if line == b"": + break + + try: + raw_name, raw_value = line.split(b":", 1) + except ValueError: # not enough values to unpack (expected 2, got 1) + raise ValueError(f"invalid HTTP header line: {d(line)}") from None + if not _token_re.fullmatch(raw_name): + raise ValueError(f"invalid HTTP header name: {d(raw_name)}") + raw_value = raw_value.strip(b" \t") + if not _value_re.fullmatch(raw_value): + raise ValueError(f"invalid HTTP header value: {d(raw_value)}") + + name = raw_name.decode("ascii") # guaranteed to be ASCII at this point + value = raw_value.decode("ascii", "surrogateescape") + headers[name] = value + + else: + raise SecurityError("too many HTTP headers") + + return headers + + +def parse_line( + read_line: Callable[[int], Generator[None, None, bytes]], +) -> Generator[None, None, bytes]: + """ + Parse a single line. + + CRLF is stripped from the return value. + + Args: + read_line: Generator-based coroutine that reads a LF-terminated line + or raises an exception if there isn't enough data. + + Raises: + EOFError: If the connection is closed without a CRLF. + SecurityError: If the response exceeds a security limit. + + """ + try: + line = yield from read_line(MAX_LINE_LENGTH) + except RuntimeError: + raise SecurityError("line too long") + # Not mandatory but safe - https://datatracker.ietf.org/doc/html/rfc7230#section-3.5 + if not line.endswith(b"\r\n"): + raise EOFError("line without CRLF") + return line[:-2] diff --git a/hackaton/lib/python3.12/site-packages/websockets/imports.py b/hackaton/lib/python3.12/site-packages/websockets/imports.py new file mode 100644 index 0000000..bb80e4e --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/websockets/imports.py @@ -0,0 +1,99 @@ +from __future__ import annotations + +import warnings +from typing import Any, Iterable + + +__all__ = ["lazy_import"] + + +def import_name(name: str, source: str, namespace: dict[str, Any]) -> Any: + """ + Import ``name`` from ``source`` in ``namespace``. + + There are two use cases: + + - ``name`` is an object defined in ``source``; + - ``name`` is a submodule of ``source``. + + Neither :func:`__import__` nor :func:`~importlib.import_module` does + exactly this. :func:`__import__` is closer to the intended behavior. + + """ + level = 0 + while source[level] == ".": + level += 1 + assert level < len(source), "importing from parent isn't supported" + module = __import__(source[level:], namespace, None, [name], level) + return getattr(module, name) + + +def lazy_import( + namespace: dict[str, Any], + aliases: dict[str, str] | None = None, + deprecated_aliases: dict[str, str] | None = None, +) -> None: + """ + Provide lazy, module-level imports. + + Typical use:: + + __getattr__, __dir__ = lazy_import( + globals(), + aliases={ + "": "", + ... + }, + deprecated_aliases={ + ..., + } + ) + + This function defines ``__getattr__`` and ``__dir__`` per :pep:`562`. + + """ + if aliases is None: + aliases = {} + if deprecated_aliases is None: + deprecated_aliases = {} + + namespace_set = set(namespace) + aliases_set = set(aliases) + deprecated_aliases_set = set(deprecated_aliases) + + assert not namespace_set & aliases_set, "namespace conflict" + assert not namespace_set & deprecated_aliases_set, "namespace conflict" + assert not aliases_set & deprecated_aliases_set, "namespace conflict" + + package = namespace["__name__"] + + def __getattr__(name: str) -> Any: + assert aliases is not None # mypy cannot figure this out + try: + source = aliases[name] + except KeyError: + pass + else: + return import_name(name, source, namespace) + + assert deprecated_aliases is not None # mypy cannot figure this out + try: + source = deprecated_aliases[name] + except KeyError: + pass + else: + warnings.warn( + f"{package}.{name} is deprecated", + DeprecationWarning, + stacklevel=2, + ) + return import_name(name, source, namespace) + + raise AttributeError(f"module {package!r} has no attribute {name!r}") + + namespace["__getattr__"] = __getattr__ + + def __dir__() -> Iterable[str]: + return sorted(namespace_set | aliases_set | deprecated_aliases_set) + + namespace["__dir__"] = __dir__ diff --git a/hackaton/lib/python3.12/site-packages/websockets/legacy/__init__.py b/hackaton/lib/python3.12/site-packages/websockets/legacy/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/hackaton/lib/python3.12/site-packages/websockets/legacy/__pycache__/__init__.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/websockets/legacy/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..75b30a9 Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/websockets/legacy/__pycache__/__init__.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/websockets/legacy/__pycache__/auth.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/websockets/legacy/__pycache__/auth.cpython-312.pyc new file mode 100644 index 0000000..2e29489 Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/websockets/legacy/__pycache__/auth.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/websockets/legacy/__pycache__/client.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/websockets/legacy/__pycache__/client.cpython-312.pyc new file mode 100644 index 0000000..0f126a0 Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/websockets/legacy/__pycache__/client.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/websockets/legacy/__pycache__/exceptions.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/websockets/legacy/__pycache__/exceptions.cpython-312.pyc new file mode 100644 index 0000000..d1c2eed Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/websockets/legacy/__pycache__/exceptions.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/websockets/legacy/__pycache__/framing.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/websockets/legacy/__pycache__/framing.cpython-312.pyc new file mode 100644 index 0000000..28a3350 Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/websockets/legacy/__pycache__/framing.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/websockets/legacy/__pycache__/handshake.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/websockets/legacy/__pycache__/handshake.cpython-312.pyc new file mode 100644 index 0000000..2134c02 Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/websockets/legacy/__pycache__/handshake.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/websockets/legacy/__pycache__/http.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/websockets/legacy/__pycache__/http.cpython-312.pyc new file mode 100644 index 0000000..5be92d4 Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/websockets/legacy/__pycache__/http.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/websockets/legacy/__pycache__/protocol.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/websockets/legacy/__pycache__/protocol.cpython-312.pyc new file mode 100644 index 0000000..a586bd7 Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/websockets/legacy/__pycache__/protocol.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/websockets/legacy/__pycache__/server.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/websockets/legacy/__pycache__/server.cpython-312.pyc new file mode 100644 index 0000000..dfd8180 Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/websockets/legacy/__pycache__/server.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/websockets/legacy/auth.py b/hackaton/lib/python3.12/site-packages/websockets/legacy/auth.py new file mode 100644 index 0000000..4d030e5 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/websockets/legacy/auth.py @@ -0,0 +1,190 @@ +from __future__ import annotations + +import functools +import hmac +import http +from typing import Any, Awaitable, Callable, Iterable, Tuple, cast + +from ..datastructures import Headers +from ..exceptions import InvalidHeader +from ..headers import build_www_authenticate_basic, parse_authorization_basic +from .server import HTTPResponse, WebSocketServerProtocol + + +__all__ = ["BasicAuthWebSocketServerProtocol", "basic_auth_protocol_factory"] + +# Change to tuple[str, str] when dropping Python < 3.9. +Credentials = Tuple[str, str] + + +def is_credentials(value: Any) -> bool: + try: + username, password = value + except (TypeError, ValueError): + return False + else: + return isinstance(username, str) and isinstance(password, str) + + +class BasicAuthWebSocketServerProtocol(WebSocketServerProtocol): + """ + WebSocket server protocol that enforces HTTP Basic Auth. + + """ + + realm: str = "" + """ + Scope of protection. + + If provided, it should contain only ASCII characters because the + encoding of non-ASCII characters is undefined. + """ + + username: str | None = None + """Username of the authenticated user.""" + + def __init__( + self, + *args: Any, + realm: str | None = None, + check_credentials: Callable[[str, str], Awaitable[bool]] | None = None, + **kwargs: Any, + ) -> None: + if realm is not None: + self.realm = realm # shadow class attribute + self._check_credentials = check_credentials + super().__init__(*args, **kwargs) + + async def check_credentials(self, username: str, password: str) -> bool: + """ + Check whether credentials are authorized. + + This coroutine may be overridden in a subclass, for example to + authenticate against a database or an external service. + + Args: + username: HTTP Basic Auth username. + password: HTTP Basic Auth password. + + Returns: + :obj:`True` if the handshake should continue; + :obj:`False` if it should fail with an HTTP 401 error. + + """ + if self._check_credentials is not None: + return await self._check_credentials(username, password) + + return False + + async def process_request( + self, + path: str, + request_headers: Headers, + ) -> HTTPResponse | None: + """ + Check HTTP Basic Auth and return an HTTP 401 response if needed. + + """ + try: + authorization = request_headers["Authorization"] + except KeyError: + return ( + http.HTTPStatus.UNAUTHORIZED, + [("WWW-Authenticate", build_www_authenticate_basic(self.realm))], + b"Missing credentials\n", + ) + + try: + username, password = parse_authorization_basic(authorization) + except InvalidHeader: + return ( + http.HTTPStatus.UNAUTHORIZED, + [("WWW-Authenticate", build_www_authenticate_basic(self.realm))], + b"Unsupported credentials\n", + ) + + if not await self.check_credentials(username, password): + return ( + http.HTTPStatus.UNAUTHORIZED, + [("WWW-Authenticate", build_www_authenticate_basic(self.realm))], + b"Invalid credentials\n", + ) + + self.username = username + + return await super().process_request(path, request_headers) + + +def basic_auth_protocol_factory( + realm: str | None = None, + credentials: Credentials | Iterable[Credentials] | None = None, + check_credentials: Callable[[str, str], Awaitable[bool]] | None = None, + create_protocol: Callable[..., BasicAuthWebSocketServerProtocol] | None = None, +) -> Callable[..., BasicAuthWebSocketServerProtocol]: + """ + Protocol factory that enforces HTTP Basic Auth. + + :func:`basic_auth_protocol_factory` is designed to integrate with + :func:`~websockets.legacy.server.serve` like this:: + + serve( + ..., + create_protocol=basic_auth_protocol_factory( + realm="my dev server", + credentials=("hello", "iloveyou"), + ) + ) + + Args: + realm: Scope of protection. It should contain only ASCII characters + because the encoding of non-ASCII characters is undefined. + Refer to section 2.2 of :rfc:`7235` for details. + credentials: Hard coded authorized credentials. It can be a + ``(username, password)`` pair or a list of such pairs. + check_credentials: Coroutine that verifies credentials. + It receives ``username`` and ``password`` arguments + and returns a :class:`bool`. One of ``credentials`` or + ``check_credentials`` must be provided but not both. + create_protocol: Factory that creates the protocol. By default, this + is :class:`BasicAuthWebSocketServerProtocol`. It can be replaced + by a subclass. + Raises: + TypeError: If the ``credentials`` or ``check_credentials`` argument is + wrong. + + """ + if (credentials is None) == (check_credentials is None): + raise TypeError("provide either credentials or check_credentials") + + if credentials is not None: + if is_credentials(credentials): + credentials_list = [cast(Credentials, credentials)] + elif isinstance(credentials, Iterable): + credentials_list = list(cast(Iterable[Credentials], credentials)) + if not all(is_credentials(item) for item in credentials_list): + raise TypeError(f"invalid credentials argument: {credentials}") + else: + raise TypeError(f"invalid credentials argument: {credentials}") + + credentials_dict = dict(credentials_list) + + async def check_credentials(username: str, password: str) -> bool: + try: + expected_password = credentials_dict[username] + except KeyError: + return False + return hmac.compare_digest(expected_password, password) + + if create_protocol is None: + create_protocol = BasicAuthWebSocketServerProtocol + + # Help mypy and avoid this error: "type[BasicAuthWebSocketServerProtocol] | + # Callable[..., BasicAuthWebSocketServerProtocol]" not callable [misc] + create_protocol = cast( + Callable[..., BasicAuthWebSocketServerProtocol], create_protocol + ) + return functools.partial( + create_protocol, + realm=realm, + check_credentials=check_credentials, + ) diff --git a/hackaton/lib/python3.12/site-packages/websockets/legacy/client.py b/hackaton/lib/python3.12/site-packages/websockets/legacy/client.py new file mode 100644 index 0000000..ec4c2ff --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/websockets/legacy/client.py @@ -0,0 +1,707 @@ +from __future__ import annotations + +import asyncio +import functools +import logging +import os +import random +import urllib.parse +import warnings +from types import TracebackType +from typing import ( + Any, + AsyncIterator, + Callable, + Generator, + Sequence, + cast, +) + +from ..asyncio.compatibility import asyncio_timeout +from ..datastructures import Headers, HeadersLike +from ..exceptions import ( + InvalidHeader, + InvalidHeaderValue, + NegotiationError, + SecurityError, +) +from ..extensions import ClientExtensionFactory, Extension +from ..extensions.permessage_deflate import enable_client_permessage_deflate +from ..headers import ( + build_authorization_basic, + build_extension, + build_host, + build_subprotocol, + parse_extension, + parse_subprotocol, + validate_subprotocols, +) +from ..http11 import USER_AGENT +from ..typing import ExtensionHeader, LoggerLike, Origin, Subprotocol +from ..uri import WebSocketURI, parse_uri +from .exceptions import InvalidMessage, InvalidStatusCode, RedirectHandshake +from .handshake import build_request, check_response +from .http import read_response +from .protocol import WebSocketCommonProtocol + + +__all__ = ["connect", "unix_connect", "WebSocketClientProtocol"] + + +class WebSocketClientProtocol(WebSocketCommonProtocol): + """ + WebSocket client connection. + + :class:`WebSocketClientProtocol` provides :meth:`recv` and :meth:`send` + coroutines for receiving and sending messages. + + It supports asynchronous iteration to receive messages:: + + async for message in websocket: + await process(message) + + The iterator exits normally when the connection is closed with close code + 1000 (OK) or 1001 (going away) or without a close code. It raises + a :exc:`~websockets.exceptions.ConnectionClosedError` when the connection + is closed with any other code. + + See :func:`connect` for the documentation of ``logger``, ``origin``, + ``extensions``, ``subprotocols``, ``extra_headers``, and + ``user_agent_header``. + + See :class:`~websockets.legacy.protocol.WebSocketCommonProtocol` for the + documentation of ``ping_interval``, ``ping_timeout``, ``close_timeout``, + ``max_size``, ``max_queue``, ``read_limit``, and ``write_limit``. + + """ + + is_client = True + side = "client" + + def __init__( + self, + *, + logger: LoggerLike | None = None, + origin: Origin | None = None, + extensions: Sequence[ClientExtensionFactory] | None = None, + subprotocols: Sequence[Subprotocol] | None = None, + extra_headers: HeadersLike | None = None, + user_agent_header: str | None = USER_AGENT, + **kwargs: Any, + ) -> None: + if logger is None: + logger = logging.getLogger("websockets.client") + super().__init__(logger=logger, **kwargs) + self.origin = origin + self.available_extensions = extensions + self.available_subprotocols = subprotocols + self.extra_headers = extra_headers + self.user_agent_header = user_agent_header + + def write_http_request(self, path: str, headers: Headers) -> None: + """ + Write request line and headers to the HTTP request. + + """ + self.path = path + self.request_headers = headers + + if self.debug: + self.logger.debug("> GET %s HTTP/1.1", path) + for key, value in headers.raw_items(): + self.logger.debug("> %s: %s", key, value) + + # Since the path and headers only contain ASCII characters, + # we can keep this simple. + request = f"GET {path} HTTP/1.1\r\n" + request += str(headers) + + self.transport.write(request.encode()) + + async def read_http_response(self) -> tuple[int, Headers]: + """ + Read status line and headers from the HTTP response. + + If the response contains a body, it may be read from ``self.reader`` + after this coroutine returns. + + Raises: + InvalidMessage: If the HTTP message is malformed or isn't an + HTTP/1.1 GET response. + + """ + try: + status_code, reason, headers = await read_response(self.reader) + except Exception as exc: + raise InvalidMessage("did not receive a valid HTTP response") from exc + + if self.debug: + self.logger.debug("< HTTP/1.1 %d %s", status_code, reason) + for key, value in headers.raw_items(): + self.logger.debug("< %s: %s", key, value) + + self.response_headers = headers + + return status_code, self.response_headers + + @staticmethod + def process_extensions( + headers: Headers, + available_extensions: Sequence[ClientExtensionFactory] | None, + ) -> list[Extension]: + """ + Handle the Sec-WebSocket-Extensions HTTP response header. + + Check that each extension is supported, as well as its parameters. + + Return the list of accepted extensions. + + Raise :exc:`~websockets.exceptions.InvalidHandshake` to abort the + connection. + + :rfc:`6455` leaves the rules up to the specification of each + :extension. + + To provide this level of flexibility, for each extension accepted by + the server, we check for a match with each extension available in the + client configuration. If no match is found, an exception is raised. + + If several variants of the same extension are accepted by the server, + it may be configured several times, which won't make sense in general. + Extensions must implement their own requirements. For this purpose, + the list of previously accepted extensions is provided. + + Other requirements, for example related to mandatory extensions or the + order of extensions, may be implemented by overriding this method. + + """ + accepted_extensions: list[Extension] = [] + + header_values = headers.get_all("Sec-WebSocket-Extensions") + + if header_values: + if available_extensions is None: + raise NegotiationError("no extensions supported") + + parsed_header_values: list[ExtensionHeader] = sum( + [parse_extension(header_value) for header_value in header_values], [] + ) + + for name, response_params in parsed_header_values: + for extension_factory in available_extensions: + # Skip non-matching extensions based on their name. + if extension_factory.name != name: + continue + + # Skip non-matching extensions based on their params. + try: + extension = extension_factory.process_response_params( + response_params, accepted_extensions + ) + except NegotiationError: + continue + + # Add matching extension to the final list. + accepted_extensions.append(extension) + + # Break out of the loop once we have a match. + break + + # If we didn't break from the loop, no extension in our list + # matched what the server sent. Fail the connection. + else: + raise NegotiationError( + f"Unsupported extension: " + f"name = {name}, params = {response_params}" + ) + + return accepted_extensions + + @staticmethod + def process_subprotocol( + headers: Headers, available_subprotocols: Sequence[Subprotocol] | None + ) -> Subprotocol | None: + """ + Handle the Sec-WebSocket-Protocol HTTP response header. + + Check that it contains exactly one supported subprotocol. + + Return the selected subprotocol. + + """ + subprotocol: Subprotocol | None = None + + header_values = headers.get_all("Sec-WebSocket-Protocol") + + if header_values: + if available_subprotocols is None: + raise NegotiationError("no subprotocols supported") + + parsed_header_values: Sequence[Subprotocol] = sum( + [parse_subprotocol(header_value) for header_value in header_values], [] + ) + + if len(parsed_header_values) > 1: + raise InvalidHeaderValue( + "Sec-WebSocket-Protocol", + f"multiple values: {', '.join(parsed_header_values)}", + ) + + subprotocol = parsed_header_values[0] + + if subprotocol not in available_subprotocols: + raise NegotiationError(f"unsupported subprotocol: {subprotocol}") + + return subprotocol + + async def handshake( + self, + wsuri: WebSocketURI, + origin: Origin | None = None, + available_extensions: Sequence[ClientExtensionFactory] | None = None, + available_subprotocols: Sequence[Subprotocol] | None = None, + extra_headers: HeadersLike | None = None, + ) -> None: + """ + Perform the client side of the opening handshake. + + Args: + wsuri: URI of the WebSocket server. + origin: Value of the ``Origin`` header. + extensions: List of supported extensions, in order in which they + should be negotiated and run. + subprotocols: List of supported subprotocols, in order of decreasing + preference. + extra_headers: Arbitrary HTTP headers to add to the handshake request. + + Raises: + InvalidHandshake: If the handshake fails. + + """ + request_headers = Headers() + + request_headers["Host"] = build_host(wsuri.host, wsuri.port, wsuri.secure) + + if wsuri.user_info: + request_headers["Authorization"] = build_authorization_basic( + *wsuri.user_info + ) + + if origin is not None: + request_headers["Origin"] = origin + + key = build_request(request_headers) + + if available_extensions is not None: + extensions_header = build_extension( + [ + (extension_factory.name, extension_factory.get_request_params()) + for extension_factory in available_extensions + ] + ) + request_headers["Sec-WebSocket-Extensions"] = extensions_header + + if available_subprotocols is not None: + protocol_header = build_subprotocol(available_subprotocols) + request_headers["Sec-WebSocket-Protocol"] = protocol_header + + if self.extra_headers is not None: + request_headers.update(self.extra_headers) + + if self.user_agent_header: + request_headers.setdefault("User-Agent", self.user_agent_header) + + self.write_http_request(wsuri.resource_name, request_headers) + + status_code, response_headers = await self.read_http_response() + if status_code in (301, 302, 303, 307, 308): + if "Location" not in response_headers: + raise InvalidHeader("Location") + raise RedirectHandshake(response_headers["Location"]) + elif status_code != 101: + raise InvalidStatusCode(status_code, response_headers) + + check_response(response_headers, key) + + self.extensions = self.process_extensions( + response_headers, available_extensions + ) + + self.subprotocol = self.process_subprotocol( + response_headers, available_subprotocols + ) + + self.connection_open() + + +class Connect: + """ + Connect to the WebSocket server at ``uri``. + + Awaiting :func:`connect` yields a :class:`WebSocketClientProtocol` which + can then be used to send and receive messages. + + :func:`connect` can be used as a asynchronous context manager:: + + async with connect(...) as websocket: + ... + + The connection is closed automatically when exiting the context. + + :func:`connect` can be used as an infinite asynchronous iterator to + reconnect automatically on errors:: + + async for websocket in connect(...): + try: + ... + except websockets.ConnectionClosed: + continue + + The connection is closed automatically after each iteration of the loop. + + If an error occurs while establishing the connection, :func:`connect` + retries with exponential backoff. The backoff delay starts at three + seconds and increases up to one minute. + + If an error occurs in the body of the loop, you can handle the exception + and :func:`connect` will reconnect with the next iteration; or you can + let the exception bubble up and break out of the loop. This lets you + decide which errors trigger a reconnection and which errors are fatal. + + Args: + uri: URI of the WebSocket server. + create_protocol: Factory for the :class:`asyncio.Protocol` managing + the connection. It defaults to :class:`WebSocketClientProtocol`. + Set it to a wrapper or a subclass to customize connection handling. + logger: Logger for this client. + It defaults to ``logging.getLogger("websockets.client")``. + See the :doc:`logging guide <../../topics/logging>` for details. + compression: The "permessage-deflate" extension is enabled by default. + Set ``compression`` to :obj:`None` to disable it. See the + :doc:`compression guide <../../topics/compression>` for details. + origin: Value of the ``Origin`` header, for servers that require it. + extensions: List of supported extensions, in order in which they + should be negotiated and run. + subprotocols: List of supported subprotocols, in order of decreasing + preference. + extra_headers: Arbitrary HTTP headers to add to the handshake request. + user_agent_header: Value of the ``User-Agent`` request header. + It defaults to ``"Python/x.y.z websockets/X.Y"``. + Setting it to :obj:`None` removes the header. + open_timeout: Timeout for opening the connection in seconds. + :obj:`None` disables the timeout. + + See :class:`~websockets.legacy.protocol.WebSocketCommonProtocol` for the + documentation of ``ping_interval``, ``ping_timeout``, ``close_timeout``, + ``max_size``, ``max_queue``, ``read_limit``, and ``write_limit``. + + Any other keyword arguments are passed the event loop's + :meth:`~asyncio.loop.create_connection` method. + + For example: + + * You can set ``ssl`` to a :class:`~ssl.SSLContext` to enforce TLS + settings. When connecting to a ``wss://`` URI, if ``ssl`` isn't + provided, a TLS context is created + with :func:`~ssl.create_default_context`. + + * You can set ``host`` and ``port`` to connect to a different host and + port from those found in ``uri``. This only changes the destination of + the TCP connection. The host name from ``uri`` is still used in the TLS + handshake for secure connections and in the ``Host`` header. + + Raises: + InvalidURI: If ``uri`` isn't a valid WebSocket URI. + OSError: If the TCP connection fails. + InvalidHandshake: If the opening handshake fails. + ~asyncio.TimeoutError: If the opening handshake times out. + + """ + + MAX_REDIRECTS_ALLOWED = int(os.environ.get("WEBSOCKETS_MAX_REDIRECTS", "10")) + + def __init__( + self, + uri: str, + *, + create_protocol: Callable[..., WebSocketClientProtocol] | None = None, + logger: LoggerLike | None = None, + compression: str | None = "deflate", + origin: Origin | None = None, + extensions: Sequence[ClientExtensionFactory] | None = None, + subprotocols: Sequence[Subprotocol] | None = None, + extra_headers: HeadersLike | None = None, + user_agent_header: str | None = USER_AGENT, + open_timeout: float | None = 10, + ping_interval: float | None = 20, + ping_timeout: float | None = 20, + close_timeout: float | None = None, + max_size: int | None = 2**20, + max_queue: int | None = 2**5, + read_limit: int = 2**16, + write_limit: int = 2**16, + **kwargs: Any, + ) -> None: + # Backwards compatibility: close_timeout used to be called timeout. + timeout: float | None = kwargs.pop("timeout", None) + if timeout is None: + timeout = 10 + else: + warnings.warn("rename timeout to close_timeout", DeprecationWarning) + # If both are specified, timeout is ignored. + if close_timeout is None: + close_timeout = timeout + + # Backwards compatibility: create_protocol used to be called klass. + klass: type[WebSocketClientProtocol] | None = kwargs.pop("klass", None) + if klass is None: + klass = WebSocketClientProtocol + else: + warnings.warn("rename klass to create_protocol", DeprecationWarning) + # If both are specified, klass is ignored. + if create_protocol is None: + create_protocol = klass + + # Backwards compatibility: recv() used to return None on closed connections + legacy_recv: bool = kwargs.pop("legacy_recv", False) + + # Backwards compatibility: the loop parameter used to be supported. + _loop: asyncio.AbstractEventLoop | None = kwargs.pop("loop", None) + if _loop is None: + loop = asyncio.get_event_loop() + else: + loop = _loop + warnings.warn("remove loop argument", DeprecationWarning) + + wsuri = parse_uri(uri) + if wsuri.secure: + kwargs.setdefault("ssl", True) + elif kwargs.get("ssl") is not None: + raise ValueError( + "connect() received a ssl argument for a ws:// URI, " + "use a wss:// URI to enable TLS" + ) + + if compression == "deflate": + extensions = enable_client_permessage_deflate(extensions) + elif compression is not None: + raise ValueError(f"unsupported compression: {compression}") + + if subprotocols is not None: + validate_subprotocols(subprotocols) + + # Help mypy and avoid this error: "type[WebSocketClientProtocol] | + # Callable[..., WebSocketClientProtocol]" not callable [misc] + create_protocol = cast(Callable[..., WebSocketClientProtocol], create_protocol) + factory = functools.partial( + create_protocol, + logger=logger, + origin=origin, + extensions=extensions, + subprotocols=subprotocols, + extra_headers=extra_headers, + user_agent_header=user_agent_header, + ping_interval=ping_interval, + ping_timeout=ping_timeout, + close_timeout=close_timeout, + max_size=max_size, + max_queue=max_queue, + read_limit=read_limit, + write_limit=write_limit, + host=wsuri.host, + port=wsuri.port, + secure=wsuri.secure, + legacy_recv=legacy_recv, + loop=_loop, + ) + + if kwargs.pop("unix", False): + path: str | None = kwargs.pop("path", None) + create_connection = functools.partial( + loop.create_unix_connection, factory, path, **kwargs + ) + else: + host: str | None + port: int | None + if kwargs.get("sock") is None: + host, port = wsuri.host, wsuri.port + else: + # If sock is given, host and port shouldn't be specified. + host, port = None, None + if kwargs.get("ssl"): + kwargs.setdefault("server_hostname", wsuri.host) + # If host and port are given, override values from the URI. + host = kwargs.pop("host", host) + port = kwargs.pop("port", port) + create_connection = functools.partial( + loop.create_connection, factory, host, port, **kwargs + ) + + self.open_timeout = open_timeout + if logger is None: + logger = logging.getLogger("websockets.client") + self.logger = logger + + # This is a coroutine function. + self._create_connection = create_connection + self._uri = uri + self._wsuri = wsuri + + def handle_redirect(self, uri: str) -> None: + # Update the state of this instance to connect to a new URI. + old_uri = self._uri + old_wsuri = self._wsuri + new_uri = urllib.parse.urljoin(old_uri, uri) + new_wsuri = parse_uri(new_uri) + + # Forbid TLS downgrade. + if old_wsuri.secure and not new_wsuri.secure: + raise SecurityError("redirect from WSS to WS") + + same_origin = ( + old_wsuri.secure == new_wsuri.secure + and old_wsuri.host == new_wsuri.host + and old_wsuri.port == new_wsuri.port + ) + + # Rewrite secure, host, and port for cross-origin redirects. + # This preserves connection overrides with the host and port + # arguments if the redirect points to the same host and port. + if not same_origin: + factory = self._create_connection.args[0] + # Support TLS upgrade. + if not old_wsuri.secure and new_wsuri.secure: + factory.keywords["secure"] = True + self._create_connection.keywords.setdefault("ssl", True) + # Replace secure, host, and port arguments of the protocol factory. + factory = functools.partial( + factory.func, + *factory.args, + **dict(factory.keywords, host=new_wsuri.host, port=new_wsuri.port), + ) + # Replace secure, host, and port arguments of create_connection. + self._create_connection = functools.partial( + self._create_connection.func, + *(factory, new_wsuri.host, new_wsuri.port), + **self._create_connection.keywords, + ) + + # Set the new WebSocket URI. This suffices for same-origin redirects. + self._uri = new_uri + self._wsuri = new_wsuri + + # async for ... in connect(...): + + BACKOFF_INITIAL = float(os.environ.get("WEBSOCKETS_BACKOFF_INITIAL_DELAY", "5")) + BACKOFF_MIN = float(os.environ.get("WEBSOCKETS_BACKOFF_MIN_DELAY", "3.1")) + BACKOFF_MAX = float(os.environ.get("WEBSOCKETS_BACKOFF_MAX_DELAY", "90.0")) + BACKOFF_FACTOR = float(os.environ.get("WEBSOCKETS_BACKOFF_FACTOR", "1.618")) + + async def __aiter__(self) -> AsyncIterator[WebSocketClientProtocol]: + backoff_delay = self.BACKOFF_MIN / self.BACKOFF_FACTOR + while True: + try: + async with self as protocol: + yield protocol + except Exception: + # Add a random initial delay between 0 and 5 seconds. + # See 7.2.3. Recovering from Abnormal Closure in RFC 6455. + if backoff_delay == self.BACKOFF_MIN: + initial_delay = random.random() * self.BACKOFF_INITIAL + self.logger.info( + "! connect failed; reconnecting in %.1f seconds", + initial_delay, + exc_info=True, + ) + await asyncio.sleep(initial_delay) + else: + self.logger.info( + "! connect failed again; retrying in %d seconds", + int(backoff_delay), + exc_info=True, + ) + await asyncio.sleep(int(backoff_delay)) + # Increase delay with truncated exponential backoff. + backoff_delay = backoff_delay * self.BACKOFF_FACTOR + backoff_delay = min(backoff_delay, self.BACKOFF_MAX) + continue + else: + # Connection succeeded - reset backoff delay + backoff_delay = self.BACKOFF_MIN + + # async with connect(...) as ...: + + async def __aenter__(self) -> WebSocketClientProtocol: + return await self + + async def __aexit__( + self, + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + traceback: TracebackType | None, + ) -> None: + await self.protocol.close() + + # ... = await connect(...) + + def __await__(self) -> Generator[Any, None, WebSocketClientProtocol]: + # Create a suitable iterator by calling __await__ on a coroutine. + return self.__await_impl__().__await__() + + async def __await_impl__(self) -> WebSocketClientProtocol: + async with asyncio_timeout(self.open_timeout): + for _redirects in range(self.MAX_REDIRECTS_ALLOWED): + _transport, protocol = await self._create_connection() + try: + await protocol.handshake( + self._wsuri, + origin=protocol.origin, + available_extensions=protocol.available_extensions, + available_subprotocols=protocol.available_subprotocols, + extra_headers=protocol.extra_headers, + ) + except RedirectHandshake as exc: + protocol.fail_connection() + await protocol.wait_closed() + self.handle_redirect(exc.uri) + # Avoid leaking a connected socket when the handshake fails. + except (Exception, asyncio.CancelledError): + protocol.fail_connection() + await protocol.wait_closed() + raise + else: + self.protocol = protocol + return protocol + else: + raise SecurityError("too many redirects") + + # ... = yield from connect(...) - remove when dropping Python < 3.10 + + __iter__ = __await__ + + +connect = Connect + + +def unix_connect( + path: str | None = None, + uri: str = "ws://localhost/", + **kwargs: Any, +) -> Connect: + """ + Similar to :func:`connect`, but for connecting to a Unix socket. + + This function builds upon the event loop's + :meth:`~asyncio.loop.create_unix_connection` method. + + It is only available on Unix. + + It's mainly useful for debugging servers listening on Unix sockets. + + Args: + path: File system path to the Unix socket. + uri: URI of the WebSocket server; the host is used in the TLS + handshake for secure connections and in the ``Host`` header. + + """ + return connect(uri=uri, path=path, unix=True, **kwargs) diff --git a/hackaton/lib/python3.12/site-packages/websockets/legacy/exceptions.py b/hackaton/lib/python3.12/site-packages/websockets/legacy/exceptions.py new file mode 100644 index 0000000..9ca9b7a --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/websockets/legacy/exceptions.py @@ -0,0 +1,78 @@ +import http + +from .. import datastructures +from ..exceptions import ( + InvalidHandshake, + ProtocolError as WebSocketProtocolError, # noqa: F401 +) +from ..typing import StatusLike + + +class InvalidMessage(InvalidHandshake): + """ + Raised when a handshake request or response is malformed. + + """ + + +class InvalidStatusCode(InvalidHandshake): + """ + Raised when a handshake response status code is invalid. + + """ + + def __init__(self, status_code: int, headers: datastructures.Headers) -> None: + self.status_code = status_code + self.headers = headers + + def __str__(self) -> str: + return f"server rejected WebSocket connection: HTTP {self.status_code}" + + +class AbortHandshake(InvalidHandshake): + """ + Raised to abort the handshake on purpose and return an HTTP response. + + This exception is an implementation detail. + + The public API is + :meth:`~websockets.legacy.server.WebSocketServerProtocol.process_request`. + + Attributes: + status (~http.HTTPStatus): HTTP status code. + headers (Headers): HTTP response headers. + body (bytes): HTTP response body. + """ + + def __init__( + self, + status: StatusLike, + headers: datastructures.HeadersLike, + body: bytes = b"", + ) -> None: + # If a user passes an int instead of a HTTPStatus, fix it automatically. + self.status = http.HTTPStatus(status) + self.headers = datastructures.Headers(headers) + self.body = body + + def __str__(self) -> str: + return ( + f"HTTP {self.status:d}, " + f"{len(self.headers)} headers, " + f"{len(self.body)} bytes" + ) + + +class RedirectHandshake(InvalidHandshake): + """ + Raised when a handshake gets redirected. + + This exception is an implementation detail. + + """ + + def __init__(self, uri: str) -> None: + self.uri = uri + + def __str__(self) -> str: + return f"redirect to {self.uri}" diff --git a/hackaton/lib/python3.12/site-packages/websockets/legacy/framing.py b/hackaton/lib/python3.12/site-packages/websockets/legacy/framing.py new file mode 100644 index 0000000..4c2f8c2 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/websockets/legacy/framing.py @@ -0,0 +1,224 @@ +from __future__ import annotations + +import struct +from typing import Any, Awaitable, Callable, NamedTuple, Sequence + +from .. import extensions, frames +from ..exceptions import PayloadTooBig, ProtocolError +from ..frames import BytesLike +from ..typing import Data + + +try: + from ..speedups import apply_mask +except ImportError: + from ..utils import apply_mask + + +class Frame(NamedTuple): + fin: bool + opcode: frames.Opcode + data: bytes + rsv1: bool = False + rsv2: bool = False + rsv3: bool = False + + @property + def new_frame(self) -> frames.Frame: + return frames.Frame( + self.opcode, + self.data, + self.fin, + self.rsv1, + self.rsv2, + self.rsv3, + ) + + def __str__(self) -> str: + return str(self.new_frame) + + def check(self) -> None: + return self.new_frame.check() + + @classmethod + async def read( + cls, + reader: Callable[[int], Awaitable[bytes]], + *, + mask: bool, + max_size: int | None = None, + extensions: Sequence[extensions.Extension] | None = None, + ) -> Frame: + """ + Read a WebSocket frame. + + Args: + reader: Coroutine that reads exactly the requested number of + bytes, unless the end of file is reached. + mask: Whether the frame should be masked i.e. whether the read + happens on the server side. + max_size: Maximum payload size in bytes. + extensions: List of extensions, applied in reverse order. + + Raises: + PayloadTooBig: If the frame exceeds ``max_size``. + ProtocolError: If the frame contains incorrect values. + + """ + + # Read the header. + data = await reader(2) + head1, head2 = struct.unpack("!BB", data) + + # While not Pythonic, this is marginally faster than calling bool(). + fin = True if head1 & 0b10000000 else False + rsv1 = True if head1 & 0b01000000 else False + rsv2 = True if head1 & 0b00100000 else False + rsv3 = True if head1 & 0b00010000 else False + + try: + opcode = frames.Opcode(head1 & 0b00001111) + except ValueError as exc: + raise ProtocolError("invalid opcode") from exc + + if (True if head2 & 0b10000000 else False) != mask: + raise ProtocolError("incorrect masking") + + length = head2 & 0b01111111 + if length == 126: + data = await reader(2) + (length,) = struct.unpack("!H", data) + elif length == 127: + data = await reader(8) + (length,) = struct.unpack("!Q", data) + if max_size is not None and length > max_size: + raise PayloadTooBig(f"over size limit ({length} > {max_size} bytes)") + if mask: + mask_bits = await reader(4) + + # Read the data. + data = await reader(length) + if mask: + data = apply_mask(data, mask_bits) + + new_frame = frames.Frame(opcode, data, fin, rsv1, rsv2, rsv3) + + if extensions is None: + extensions = [] + for extension in reversed(extensions): + new_frame = extension.decode(new_frame, max_size=max_size) + + new_frame.check() + + return cls( + new_frame.fin, + new_frame.opcode, + new_frame.data, + new_frame.rsv1, + new_frame.rsv2, + new_frame.rsv3, + ) + + def write( + self, + write: Callable[[bytes], Any], + *, + mask: bool, + extensions: Sequence[extensions.Extension] | None = None, + ) -> None: + """ + Write a WebSocket frame. + + Args: + frame: Frame to write. + write: Function that writes bytes. + mask: Whether the frame should be masked i.e. whether the write + happens on the client side. + extensions: List of extensions, applied in order. + + Raises: + ProtocolError: If the frame contains incorrect values. + + """ + # The frame is written in a single call to write in order to prevent + # TCP fragmentation. See #68 for details. This also makes it safe to + # send frames concurrently from multiple coroutines. + write(self.new_frame.serialize(mask=mask, extensions=extensions)) + + +def prepare_data(data: Data) -> tuple[int, bytes]: + """ + Convert a string or byte-like object to an opcode and a bytes-like object. + + This function is designed for data frames. + + If ``data`` is a :class:`str`, return ``OP_TEXT`` and a :class:`bytes` + object encoding ``data`` in UTF-8. + + If ``data`` is a bytes-like object, return ``OP_BINARY`` and a bytes-like + object. + + Raises: + TypeError: If ``data`` doesn't have a supported type. + + """ + if isinstance(data, str): + return frames.Opcode.TEXT, data.encode() + elif isinstance(data, BytesLike): + return frames.Opcode.BINARY, data + else: + raise TypeError("data must be str or bytes-like") + + +def prepare_ctrl(data: Data) -> bytes: + """ + Convert a string or byte-like object to bytes. + + This function is designed for ping and pong frames. + + If ``data`` is a :class:`str`, return a :class:`bytes` object encoding + ``data`` in UTF-8. + + If ``data`` is a bytes-like object, return a :class:`bytes` object. + + Raises: + TypeError: If ``data`` doesn't have a supported type. + + """ + if isinstance(data, str): + return data.encode() + elif isinstance(data, BytesLike): + return bytes(data) + else: + raise TypeError("data must be str or bytes-like") + + +# Backwards compatibility with previously documented public APIs +encode_data = prepare_ctrl + +# Backwards compatibility with previously documented public APIs +from ..frames import Close # noqa: E402 F401, I001 + + +def parse_close(data: bytes) -> tuple[int, str]: + """ + Parse the payload from a close frame. + + Returns: + Close code and reason. + + Raises: + ProtocolError: If data is ill-formed. + UnicodeDecodeError: If the reason isn't valid UTF-8. + + """ + close = Close.parse(data) + return close.code, close.reason + + +def serialize_close(code: int, reason: str) -> bytes: + """ + Serialize the payload for a close frame. + + """ + return Close(code, reason).serialize() diff --git a/hackaton/lib/python3.12/site-packages/websockets/legacy/handshake.py b/hackaton/lib/python3.12/site-packages/websockets/legacy/handshake.py new file mode 100644 index 0000000..6a7157c --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/websockets/legacy/handshake.py @@ -0,0 +1,158 @@ +from __future__ import annotations + +import base64 +import binascii + +from ..datastructures import Headers, MultipleValuesError +from ..exceptions import InvalidHeader, InvalidHeaderValue, InvalidUpgrade +from ..headers import parse_connection, parse_upgrade +from ..typing import ConnectionOption, UpgradeProtocol +from ..utils import accept_key as accept, generate_key + + +__all__ = ["build_request", "check_request", "build_response", "check_response"] + + +def build_request(headers: Headers) -> str: + """ + Build a handshake request to send to the server. + + Update request headers passed in argument. + + Args: + headers: Handshake request headers. + + Returns: + ``key`` that must be passed to :func:`check_response`. + + """ + key = generate_key() + headers["Upgrade"] = "websocket" + headers["Connection"] = "Upgrade" + headers["Sec-WebSocket-Key"] = key + headers["Sec-WebSocket-Version"] = "13" + return key + + +def check_request(headers: Headers) -> str: + """ + Check a handshake request received from the client. + + This function doesn't verify that the request is an HTTP/1.1 or higher GET + request and doesn't perform ``Host`` and ``Origin`` checks. These controls + are usually performed earlier in the HTTP request handling code. They're + the responsibility of the caller. + + Args: + headers: Handshake request headers. + + Returns: + ``key`` that must be passed to :func:`build_response`. + + Raises: + InvalidHandshake: If the handshake request is invalid. + Then, the server must return a 400 Bad Request error. + + """ + connection: list[ConnectionOption] = sum( + [parse_connection(value) for value in headers.get_all("Connection")], [] + ) + + if not any(value.lower() == "upgrade" for value in connection): + raise InvalidUpgrade("Connection", ", ".join(connection)) + + upgrade: list[UpgradeProtocol] = sum( + [parse_upgrade(value) for value in headers.get_all("Upgrade")], [] + ) + + # For compatibility with non-strict implementations, ignore case when + # checking the Upgrade header. The RFC always uses "websocket", except + # in section 11.2. (IANA registration) where it uses "WebSocket". + if not (len(upgrade) == 1 and upgrade[0].lower() == "websocket"): + raise InvalidUpgrade("Upgrade", ", ".join(upgrade)) + + try: + s_w_key = headers["Sec-WebSocket-Key"] + except KeyError as exc: + raise InvalidHeader("Sec-WebSocket-Key") from exc + except MultipleValuesError as exc: + raise InvalidHeader("Sec-WebSocket-Key", "multiple values") from exc + + try: + raw_key = base64.b64decode(s_w_key.encode(), validate=True) + except binascii.Error as exc: + raise InvalidHeaderValue("Sec-WebSocket-Key", s_w_key) from exc + if len(raw_key) != 16: + raise InvalidHeaderValue("Sec-WebSocket-Key", s_w_key) + + try: + s_w_version = headers["Sec-WebSocket-Version"] + except KeyError as exc: + raise InvalidHeader("Sec-WebSocket-Version") from exc + except MultipleValuesError as exc: + raise InvalidHeader("Sec-WebSocket-Version", "multiple values") from exc + + if s_w_version != "13": + raise InvalidHeaderValue("Sec-WebSocket-Version", s_w_version) + + return s_w_key + + +def build_response(headers: Headers, key: str) -> None: + """ + Build a handshake response to send to the client. + + Update response headers passed in argument. + + Args: + headers: Handshake response headers. + key: Returned by :func:`check_request`. + + """ + headers["Upgrade"] = "websocket" + headers["Connection"] = "Upgrade" + headers["Sec-WebSocket-Accept"] = accept(key) + + +def check_response(headers: Headers, key: str) -> None: + """ + Check a handshake response received from the server. + + This function doesn't verify that the response is an HTTP/1.1 or higher + response with a 101 status code. These controls are the responsibility of + the caller. + + Args: + headers: Handshake response headers. + key: Returned by :func:`build_request`. + + Raises: + InvalidHandshake: If the handshake response is invalid. + + """ + connection: list[ConnectionOption] = sum( + [parse_connection(value) for value in headers.get_all("Connection")], [] + ) + + if not any(value.lower() == "upgrade" for value in connection): + raise InvalidUpgrade("Connection", " ".join(connection)) + + upgrade: list[UpgradeProtocol] = sum( + [parse_upgrade(value) for value in headers.get_all("Upgrade")], [] + ) + + # For compatibility with non-strict implementations, ignore case when + # checking the Upgrade header. The RFC always uses "websocket", except + # in section 11.2. (IANA registration) where it uses "WebSocket". + if not (len(upgrade) == 1 and upgrade[0].lower() == "websocket"): + raise InvalidUpgrade("Upgrade", ", ".join(upgrade)) + + try: + s_w_accept = headers["Sec-WebSocket-Accept"] + except KeyError as exc: + raise InvalidHeader("Sec-WebSocket-Accept") from exc + except MultipleValuesError as exc: + raise InvalidHeader("Sec-WebSocket-Accept", "multiple values") from exc + + if s_w_accept != accept(key): + raise InvalidHeaderValue("Sec-WebSocket-Accept", s_w_accept) diff --git a/hackaton/lib/python3.12/site-packages/websockets/legacy/http.py b/hackaton/lib/python3.12/site-packages/websockets/legacy/http.py new file mode 100644 index 0000000..a7c8a92 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/websockets/legacy/http.py @@ -0,0 +1,201 @@ +from __future__ import annotations + +import asyncio +import os +import re + +from ..datastructures import Headers +from ..exceptions import SecurityError + + +__all__ = ["read_request", "read_response"] + +MAX_NUM_HEADERS = int(os.environ.get("WEBSOCKETS_MAX_NUM_HEADERS", "128")) +MAX_LINE_LENGTH = int(os.environ.get("WEBSOCKETS_MAX_LINE_LENGTH", "8192")) + + +def d(value: bytes) -> str: + """ + Decode a bytestring for interpolating into an error message. + + """ + return value.decode(errors="backslashreplace") + + +# See https://datatracker.ietf.org/doc/html/rfc7230#appendix-B. + +# Regex for validating header names. + +_token_re = re.compile(rb"[-!#$%&\'*+.^_`|~0-9a-zA-Z]+") + +# Regex for validating header values. + +# We don't attempt to support obsolete line folding. + +# Include HTAB (\x09), SP (\x20), VCHAR (\x21-\x7e), obs-text (\x80-\xff). + +# The ABNF is complicated because it attempts to express that optional +# whitespace is ignored. We strip whitespace and don't revalidate that. + +# See also https://www.rfc-editor.org/errata_search.php?rfc=7230&eid=4189 + +_value_re = re.compile(rb"[\x09\x20-\x7e\x80-\xff]*") + + +async def read_request(stream: asyncio.StreamReader) -> tuple[str, Headers]: + """ + Read an HTTP/1.1 GET request and return ``(path, headers)``. + + ``path`` isn't URL-decoded or validated in any way. + + ``path`` and ``headers`` are expected to contain only ASCII characters. + Other characters are represented with surrogate escapes. + + :func:`read_request` doesn't attempt to read the request body because + WebSocket handshake requests don't have one. If the request contains a + body, it may be read from ``stream`` after this coroutine returns. + + Args: + stream: Input to read the request from. + + Raises: + EOFError: If the connection is closed without a full HTTP request. + SecurityError: If the request exceeds a security limit. + ValueError: If the request isn't well formatted. + + """ + # https://datatracker.ietf.org/doc/html/rfc7230#section-3.1.1 + + # Parsing is simple because fixed values are expected for method and + # version and because path isn't checked. Since WebSocket software tends + # to implement HTTP/1.1 strictly, there's little need for lenient parsing. + + try: + request_line = await read_line(stream) + except EOFError as exc: + raise EOFError("connection closed while reading HTTP request line") from exc + + try: + method, raw_path, version = request_line.split(b" ", 2) + except ValueError: # not enough values to unpack (expected 3, got 1-2) + raise ValueError(f"invalid HTTP request line: {d(request_line)}") from None + + if method != b"GET": + raise ValueError(f"unsupported HTTP method: {d(method)}") + if version != b"HTTP/1.1": + raise ValueError(f"unsupported HTTP version: {d(version)}") + path = raw_path.decode("ascii", "surrogateescape") + + headers = await read_headers(stream) + + return path, headers + + +async def read_response(stream: asyncio.StreamReader) -> tuple[int, str, Headers]: + """ + Read an HTTP/1.1 response and return ``(status_code, reason, headers)``. + + ``reason`` and ``headers`` are expected to contain only ASCII characters. + Other characters are represented with surrogate escapes. + + :func:`read_request` doesn't attempt to read the response body because + WebSocket handshake responses don't have one. If the response contains a + body, it may be read from ``stream`` after this coroutine returns. + + Args: + stream: Input to read the response from. + + Raises: + EOFError: If the connection is closed without a full HTTP response. + SecurityError: If the response exceeds a security limit. + ValueError: If the response isn't well formatted. + + """ + # https://datatracker.ietf.org/doc/html/rfc7230#section-3.1.2 + + # As in read_request, parsing is simple because a fixed value is expected + # for version, status_code is a 3-digit number, and reason can be ignored. + + try: + status_line = await read_line(stream) + except EOFError as exc: + raise EOFError("connection closed while reading HTTP status line") from exc + + try: + version, raw_status_code, raw_reason = status_line.split(b" ", 2) + except ValueError: # not enough values to unpack (expected 3, got 1-2) + raise ValueError(f"invalid HTTP status line: {d(status_line)}") from None + + if version != b"HTTP/1.1": + raise ValueError(f"unsupported HTTP version: {d(version)}") + try: + status_code = int(raw_status_code) + except ValueError: # invalid literal for int() with base 10 + raise ValueError(f"invalid HTTP status code: {d(raw_status_code)}") from None + if not 100 <= status_code < 1000: + raise ValueError(f"unsupported HTTP status code: {d(raw_status_code)}") + if not _value_re.fullmatch(raw_reason): + raise ValueError(f"invalid HTTP reason phrase: {d(raw_reason)}") + reason = raw_reason.decode() + + headers = await read_headers(stream) + + return status_code, reason, headers + + +async def read_headers(stream: asyncio.StreamReader) -> Headers: + """ + Read HTTP headers from ``stream``. + + Non-ASCII characters are represented with surrogate escapes. + + """ + # https://datatracker.ietf.org/doc/html/rfc7230#section-3.2 + + # We don't attempt to support obsolete line folding. + + headers = Headers() + for _ in range(MAX_NUM_HEADERS + 1): + try: + line = await read_line(stream) + except EOFError as exc: + raise EOFError("connection closed while reading HTTP headers") from exc + if line == b"": + break + + try: + raw_name, raw_value = line.split(b":", 1) + except ValueError: # not enough values to unpack (expected 2, got 1) + raise ValueError(f"invalid HTTP header line: {d(line)}") from None + if not _token_re.fullmatch(raw_name): + raise ValueError(f"invalid HTTP header name: {d(raw_name)}") + raw_value = raw_value.strip(b" \t") + if not _value_re.fullmatch(raw_value): + raise ValueError(f"invalid HTTP header value: {d(raw_value)}") + + name = raw_name.decode("ascii") # guaranteed to be ASCII at this point + value = raw_value.decode("ascii", "surrogateescape") + headers[name] = value + + else: + raise SecurityError("too many HTTP headers") + + return headers + + +async def read_line(stream: asyncio.StreamReader) -> bytes: + """ + Read a single line from ``stream``. + + CRLF is stripped from the return value. + + """ + # Security: this is bounded by the StreamReader's limit (default = 32 KiB). + line = await stream.readline() + # Security: this guarantees header values are small (hard-coded = 8 KiB) + if len(line) > MAX_LINE_LENGTH: + raise SecurityError("line too long") + # Not mandatory but safe - https://datatracker.ietf.org/doc/html/rfc7230#section-3.5 + if not line.endswith(b"\r\n"): + raise EOFError("line without CRLF") + return line[:-2] diff --git a/hackaton/lib/python3.12/site-packages/websockets/legacy/protocol.py b/hackaton/lib/python3.12/site-packages/websockets/legacy/protocol.py new file mode 100644 index 0000000..998e390 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/websockets/legacy/protocol.py @@ -0,0 +1,1645 @@ +from __future__ import annotations + +import asyncio +import codecs +import collections +import logging +import random +import ssl +import struct +import sys +import time +import uuid +import warnings +from typing import ( + Any, + AsyncIterable, + AsyncIterator, + Awaitable, + Callable, + Deque, + Iterable, + Mapping, + cast, +) + +from ..asyncio.compatibility import asyncio_timeout +from ..datastructures import Headers +from ..exceptions import ( + ConnectionClosed, + ConnectionClosedError, + ConnectionClosedOK, + InvalidState, + PayloadTooBig, + ProtocolError, +) +from ..extensions import Extension +from ..frames import ( + OK_CLOSE_CODES, + OP_BINARY, + OP_CLOSE, + OP_CONT, + OP_PING, + OP_PONG, + OP_TEXT, + Close, + CloseCode, + Opcode, +) +from ..protocol import State +from ..typing import Data, LoggerLike, Subprotocol +from .framing import Frame, prepare_ctrl, prepare_data + + +__all__ = ["WebSocketCommonProtocol"] + + +# In order to ensure consistency, the code always checks the current value of +# WebSocketCommonProtocol.state before assigning a new value and never yields +# between the check and the assignment. + + +class WebSocketCommonProtocol(asyncio.Protocol): + """ + WebSocket connection. + + :class:`WebSocketCommonProtocol` provides APIs shared between WebSocket + servers and clients. You shouldn't use it directly. Instead, use + :class:`~websockets.legacy.client.WebSocketClientProtocol` or + :class:`~websockets.legacy.server.WebSocketServerProtocol`. + + This documentation focuses on low-level details that aren't covered in the + documentation of :class:`~websockets.legacy.client.WebSocketClientProtocol` + and :class:`~websockets.legacy.server.WebSocketServerProtocol` for the sake + of simplicity. + + Once the connection is open, a Ping_ frame is sent every ``ping_interval`` + seconds. This serves as a keepalive. It helps keeping the connection open, + especially in the presence of proxies with short timeouts on inactive + connections. Set ``ping_interval`` to :obj:`None` to disable this behavior. + + .. _Ping: https://datatracker.ietf.org/doc/html/rfc6455#section-5.5.2 + + If the corresponding Pong_ frame isn't received within ``ping_timeout`` + seconds, the connection is considered unusable and is closed with code 1011. + This ensures that the remote endpoint remains responsive. Set + ``ping_timeout`` to :obj:`None` to disable this behavior. + + .. _Pong: https://datatracker.ietf.org/doc/html/rfc6455#section-5.5.3 + + See the discussion of :doc:`keepalive <../../topics/keepalive>` for details. + + The ``close_timeout`` parameter defines a maximum wait time for completing + the closing handshake and terminating the TCP connection. For legacy + reasons, :meth:`close` completes in at most ``5 * close_timeout`` seconds + for clients and ``4 * close_timeout`` for servers. + + ``close_timeout`` is a parameter of the protocol because websockets usually + calls :meth:`close` implicitly upon exit: + + * on the client side, when using :func:`~websockets.legacy.client.connect` + as a context manager; + * on the server side, when the connection handler terminates. + + To apply a timeout to any other API, wrap it in :func:`~asyncio.timeout` or + :func:`~asyncio.wait_for`. + + The ``max_size`` parameter enforces the maximum size for incoming messages + in bytes. The default value is 1 MiB. If a larger message is received, + :meth:`recv` will raise :exc:`~websockets.exceptions.ConnectionClosedError` + and the connection will be closed with code 1009. + + The ``max_queue`` parameter sets the maximum length of the queue that + holds incoming messages. The default value is ``32``. Messages are added + to an in-memory queue when they're received; then :meth:`recv` pops from + that queue. In order to prevent excessive memory consumption when + messages are received faster than they can be processed, the queue must + be bounded. If the queue fills up, the protocol stops processing incoming + data until :meth:`recv` is called. In this situation, various receive + buffers (at least in :mod:`asyncio` and in the OS) will fill up, then the + TCP receive window will shrink, slowing down transmission to avoid packet + loss. + + Since Python can use up to 4 bytes of memory to represent a single + character, each connection may use up to ``4 * max_size * max_queue`` + bytes of memory to store incoming messages. By default, this is 128 MiB. + You may want to lower the limits, depending on your application's + requirements. + + The ``read_limit`` argument sets the high-water limit of the buffer for + incoming bytes. The low-water limit is half the high-water limit. The + default value is 64 KiB, half of asyncio's default (based on the current + implementation of :class:`~asyncio.StreamReader`). + + The ``write_limit`` argument sets the high-water limit of the buffer for + outgoing bytes. The low-water limit is a quarter of the high-water limit. + The default value is 64 KiB, equal to asyncio's default (based on the + current implementation of ``FlowControlMixin``). + + See the discussion of :doc:`memory usage <../../topics/memory>` for details. + + Args: + logger: Logger for this server. + It defaults to ``logging.getLogger("websockets.protocol")``. + See the :doc:`logging guide <../../topics/logging>` for details. + ping_interval: Interval between keepalive pings in seconds. + :obj:`None` disables keepalive. + ping_timeout: Timeout for keepalive pings in seconds. + :obj:`None` disables timeouts. + close_timeout: Timeout for closing the connection in seconds. + For legacy reasons, the actual timeout is 4 or 5 times larger. + max_size: Maximum size of incoming messages in bytes. + :obj:`None` disables the limit. + max_queue: Maximum number of incoming messages in receive buffer. + :obj:`None` disables the limit. + read_limit: High-water mark of read buffer in bytes. + write_limit: High-water mark of write buffer in bytes. + + """ + + # There are only two differences between the client-side and server-side + # behavior: masking the payload and closing the underlying TCP connection. + # Set is_client = True/False and side = "client"/"server" to pick a side. + is_client: bool + side: str = "undefined" + + def __init__( + self, + *, + logger: LoggerLike | None = None, + ping_interval: float | None = 20, + ping_timeout: float | None = 20, + close_timeout: float | None = None, + max_size: int | None = 2**20, + max_queue: int | None = 2**5, + read_limit: int = 2**16, + write_limit: int = 2**16, + # The following arguments are kept only for backwards compatibility. + host: str | None = None, + port: int | None = None, + secure: bool | None = None, + legacy_recv: bool = False, + loop: asyncio.AbstractEventLoop | None = None, + timeout: float | None = None, + ) -> None: + if legacy_recv: # pragma: no cover + warnings.warn("legacy_recv is deprecated", DeprecationWarning) + + # Backwards compatibility: close_timeout used to be called timeout. + if timeout is None: + timeout = 10 + else: + warnings.warn("rename timeout to close_timeout", DeprecationWarning) + # If both are specified, timeout is ignored. + if close_timeout is None: + close_timeout = timeout + + # Backwards compatibility: the loop parameter used to be supported. + if loop is None: + loop = asyncio.get_event_loop() + else: + warnings.warn("remove loop argument", DeprecationWarning) + + self.ping_interval = ping_interval + self.ping_timeout = ping_timeout + self.close_timeout = close_timeout + self.max_size = max_size + self.max_queue = max_queue + self.read_limit = read_limit + self.write_limit = write_limit + + # Unique identifier. For logs. + self.id: uuid.UUID = uuid.uuid4() + """Unique identifier of the connection. Useful in logs.""" + + # Logger or LoggerAdapter for this connection. + if logger is None: + logger = logging.getLogger("websockets.protocol") + self.logger: LoggerLike = logging.LoggerAdapter(logger, {"websocket": self}) + """Logger for this connection.""" + + # Track if DEBUG is enabled. Shortcut logging calls if it isn't. + self.debug = logger.isEnabledFor(logging.DEBUG) + + self.loop = loop + + self._host = host + self._port = port + self._secure = secure + self.legacy_recv = legacy_recv + + # Configure read buffer limits. The high-water limit is defined by + # ``self.read_limit``. The ``limit`` argument controls the line length + # limit and half the buffer limit of :class:`~asyncio.StreamReader`. + # That's why it must be set to half of ``self.read_limit``. + self.reader = asyncio.StreamReader(limit=read_limit // 2, loop=loop) + + # Copied from asyncio.FlowControlMixin + self._paused = False + self._drain_waiter: asyncio.Future[None] | None = None + + self._drain_lock = asyncio.Lock() + + # This class implements the data transfer and closing handshake, which + # are shared between the client-side and the server-side. + # Subclasses implement the opening handshake and, on success, execute + # :meth:`connection_open` to change the state to OPEN. + self.state = State.CONNECTING + if self.debug: + self.logger.debug("= connection is CONNECTING") + + # HTTP protocol parameters. + self.path: str + """Path of the opening handshake request.""" + self.request_headers: Headers + """Opening handshake request headers.""" + self.response_headers: Headers + """Opening handshake response headers.""" + + # WebSocket protocol parameters. + self.extensions: list[Extension] = [] + self.subprotocol: Subprotocol | None = None + """Subprotocol, if one was negotiated.""" + + # Close code and reason, set when a close frame is sent or received. + self.close_rcvd: Close | None = None + self.close_sent: Close | None = None + self.close_rcvd_then_sent: bool | None = None + + # Completed when the connection state becomes CLOSED. Translates the + # :meth:`connection_lost` callback to a :class:`~asyncio.Future` + # that can be awaited. (Other :class:`~asyncio.Protocol` callbacks are + # translated by ``self.stream_reader``). + self.connection_lost_waiter: asyncio.Future[None] = loop.create_future() + + # Queue of received messages. + self.messages: Deque[Data] = collections.deque() + self._pop_message_waiter: asyncio.Future[None] | None = None + self._put_message_waiter: asyncio.Future[None] | None = None + + # Protect sending fragmented messages. + self._fragmented_message_waiter: asyncio.Future[None] | None = None + + # Mapping of ping IDs to pong waiters, in chronological order. + self.pings: dict[bytes, tuple[asyncio.Future[float], float]] = {} + + self.latency: float = 0 + """ + Latency of the connection, in seconds. + + Latency is defined as the round-trip time of the connection. It is + measured by sending a Ping frame and waiting for a matching Pong frame. + Before the first measurement, :attr:`latency` is ``0``. + + By default, websockets enables a :ref:`keepalive ` mechanism + that sends Ping frames automatically at regular intervals. You can also + send Ping frames and measure latency with :meth:`ping`. + """ + + # Task running the data transfer. + self.transfer_data_task: asyncio.Task[None] + + # Exception that occurred during data transfer, if any. + self.transfer_data_exc: BaseException | None = None + + # Task sending keepalive pings. + self.keepalive_ping_task: asyncio.Task[None] + + # Task closing the TCP connection. + self.close_connection_task: asyncio.Task[None] + + # Copied from asyncio.FlowControlMixin + async def _drain_helper(self) -> None: # pragma: no cover + if self.connection_lost_waiter.done(): + raise ConnectionResetError("Connection lost") + if not self._paused: + return + waiter = self._drain_waiter + assert waiter is None or waiter.cancelled() + waiter = self.loop.create_future() + self._drain_waiter = waiter + await waiter + + # Copied from asyncio.StreamWriter + async def _drain(self) -> None: # pragma: no cover + if self.reader is not None: + exc = self.reader.exception() + if exc is not None: + raise exc + if self.transport is not None: + if self.transport.is_closing(): + # Yield to the event loop so connection_lost() may be + # called. Without this, _drain_helper() would return + # immediately, and code that calls + # write(...); yield from drain() + # in a loop would never call connection_lost(), so it + # would not see an error when the socket is closed. + await asyncio.sleep(0) + await self._drain_helper() + + def connection_open(self) -> None: + """ + Callback when the WebSocket opening handshake completes. + + Enter the OPEN state and start the data transfer phase. + + """ + # 4.1. The WebSocket Connection is Established. + assert self.state is State.CONNECTING + self.state = State.OPEN + if self.debug: + self.logger.debug("= connection is OPEN") + # Start the task that receives incoming WebSocket messages. + self.transfer_data_task = self.loop.create_task(self.transfer_data()) + # Start the task that sends pings at regular intervals. + self.keepalive_ping_task = self.loop.create_task(self.keepalive_ping()) + # Start the task that eventually closes the TCP connection. + self.close_connection_task = self.loop.create_task(self.close_connection()) + + @property + def host(self) -> str | None: + alternative = "remote_address" if self.is_client else "local_address" + warnings.warn(f"use {alternative}[0] instead of host", DeprecationWarning) + return self._host + + @property + def port(self) -> int | None: + alternative = "remote_address" if self.is_client else "local_address" + warnings.warn(f"use {alternative}[1] instead of port", DeprecationWarning) + return self._port + + @property + def secure(self) -> bool | None: + warnings.warn("don't use secure", DeprecationWarning) + return self._secure + + # Public API + + @property + def local_address(self) -> Any: + """ + Local address of the connection. + + For IPv4 connections, this is a ``(host, port)`` tuple. + + The format of the address depends on the address family; + see :meth:`~socket.socket.getsockname`. + + :obj:`None` if the TCP connection isn't established yet. + + """ + try: + transport = self.transport + except AttributeError: + return None + else: + return transport.get_extra_info("sockname") + + @property + def remote_address(self) -> Any: + """ + Remote address of the connection. + + For IPv4 connections, this is a ``(host, port)`` tuple. + + The format of the address depends on the address family; + see :meth:`~socket.socket.getpeername`. + + :obj:`None` if the TCP connection isn't established yet. + + """ + try: + transport = self.transport + except AttributeError: + return None + else: + return transport.get_extra_info("peername") + + @property + def open(self) -> bool: + """ + :obj:`True` when the connection is open; :obj:`False` otherwise. + + This attribute may be used to detect disconnections. However, this + approach is discouraged per the EAFP_ principle. Instead, you should + handle :exc:`~websockets.exceptions.ConnectionClosed` exceptions. + + .. _EAFP: https://docs.python.org/3/glossary.html#term-eafp + + """ + return self.state is State.OPEN and not self.transfer_data_task.done() + + @property + def closed(self) -> bool: + """ + :obj:`True` when the connection is closed; :obj:`False` otherwise. + + Be aware that both :attr:`open` and :attr:`closed` are :obj:`False` + during the opening and closing sequences. + + """ + return self.state is State.CLOSED + + @property + def close_code(self) -> int | None: + """ + WebSocket close code, defined in `section 7.1.5 of RFC 6455`_. + + .. _section 7.1.5 of RFC 6455: + https://datatracker.ietf.org/doc/html/rfc6455#section-7.1.5 + + :obj:`None` if the connection isn't closed yet. + + """ + if self.state is not State.CLOSED: + return None + elif self.close_rcvd is None: + return CloseCode.ABNORMAL_CLOSURE + else: + return self.close_rcvd.code + + @property + def close_reason(self) -> str | None: + """ + WebSocket close reason, defined in `section 7.1.6 of RFC 6455`_. + + .. _section 7.1.6 of RFC 6455: + https://datatracker.ietf.org/doc/html/rfc6455#section-7.1.6 + + :obj:`None` if the connection isn't closed yet. + + """ + if self.state is not State.CLOSED: + return None + elif self.close_rcvd is None: + return "" + else: + return self.close_rcvd.reason + + async def __aiter__(self) -> AsyncIterator[Data]: + """ + Iterate on incoming messages. + + The iterator exits normally when the connection is closed with the close + code 1000 (OK) or 1001 (going away) or without a close code. + + It raises a :exc:`~websockets.exceptions.ConnectionClosedError` + exception when the connection is closed with any other code. + + """ + try: + while True: + yield await self.recv() + except ConnectionClosedOK: + return + + async def recv(self) -> Data: + """ + Receive the next message. + + When the connection is closed, :meth:`recv` raises + :exc:`~websockets.exceptions.ConnectionClosed`. Specifically, it raises + :exc:`~websockets.exceptions.ConnectionClosedOK` after a normal + connection closure and + :exc:`~websockets.exceptions.ConnectionClosedError` after a protocol + error or a network failure. This is how you detect the end of the + message stream. + + Canceling :meth:`recv` is safe. There's no risk of losing the next + message. The next invocation of :meth:`recv` will return it. + + This makes it possible to enforce a timeout by wrapping :meth:`recv` in + :func:`~asyncio.timeout` or :func:`~asyncio.wait_for`. + + Returns: + A string (:class:`str`) for a Text_ frame. A bytestring + (:class:`bytes`) for a Binary_ frame. + + .. _Text: https://datatracker.ietf.org/doc/html/rfc6455#section-5.6 + .. _Binary: https://datatracker.ietf.org/doc/html/rfc6455#section-5.6 + + Raises: + ConnectionClosed: When the connection is closed. + RuntimeError: If two coroutines call :meth:`recv` concurrently. + + """ + if self._pop_message_waiter is not None: + raise RuntimeError( + "cannot call recv while another coroutine " + "is already waiting for the next message" + ) + + # Don't await self.ensure_open() here: + # - messages could be available in the queue even if the connection + # is closed; + # - messages could be received before the closing frame even if the + # connection is closing. + + # Wait until there's a message in the queue (if necessary) or the + # connection is closed. + while len(self.messages) <= 0: + pop_message_waiter: asyncio.Future[None] = self.loop.create_future() + self._pop_message_waiter = pop_message_waiter + try: + # If asyncio.wait() is canceled, it doesn't cancel + # pop_message_waiter and self.transfer_data_task. + await asyncio.wait( + [pop_message_waiter, self.transfer_data_task], + return_when=asyncio.FIRST_COMPLETED, + ) + finally: + self._pop_message_waiter = None + + # If asyncio.wait(...) exited because self.transfer_data_task + # completed before receiving a new message, raise a suitable + # exception (or return None if legacy_recv is enabled). + if not pop_message_waiter.done(): + if self.legacy_recv: + return None # type: ignore + else: + # Wait until the connection is closed to raise + # ConnectionClosed with the correct code and reason. + await self.ensure_open() + + # Pop a message from the queue. + message = self.messages.popleft() + + # Notify transfer_data(). + if self._put_message_waiter is not None: + self._put_message_waiter.set_result(None) + self._put_message_waiter = None + + return message + + async def send( + self, + message: Data | Iterable[Data] | AsyncIterable[Data], + ) -> None: + """ + Send a message. + + A string (:class:`str`) is sent as a Text_ frame. A bytestring or + bytes-like object (:class:`bytes`, :class:`bytearray`, or + :class:`memoryview`) is sent as a Binary_ frame. + + .. _Text: https://datatracker.ietf.org/doc/html/rfc6455#section-5.6 + .. _Binary: https://datatracker.ietf.org/doc/html/rfc6455#section-5.6 + + :meth:`send` also accepts an iterable or an asynchronous iterable of + strings, bytestrings, or bytes-like objects to enable fragmentation_. + Each item is treated as a message fragment and sent in its own frame. + All items must be of the same type, or else :meth:`send` will raise a + :exc:`TypeError` and the connection will be closed. + + .. _fragmentation: https://datatracker.ietf.org/doc/html/rfc6455#section-5.4 + + :meth:`send` rejects dict-like objects because this is often an error. + (If you want to send the keys of a dict-like object as fragments, call + its :meth:`~dict.keys` method and pass the result to :meth:`send`.) + + Canceling :meth:`send` is discouraged. Instead, you should close the + connection with :meth:`close`. Indeed, there are only two situations + where :meth:`send` may yield control to the event loop and then get + canceled; in both cases, :meth:`close` has the same effect and is + more clear: + + 1. The write buffer is full. If you don't want to wait until enough + data is sent, your only alternative is to close the connection. + :meth:`close` will likely time out then abort the TCP connection. + 2. ``message`` is an asynchronous iterator that yields control. + Stopping in the middle of a fragmented message will cause a + protocol error and the connection will be closed. + + When the connection is closed, :meth:`send` raises + :exc:`~websockets.exceptions.ConnectionClosed`. Specifically, it + raises :exc:`~websockets.exceptions.ConnectionClosedOK` after a normal + connection closure and + :exc:`~websockets.exceptions.ConnectionClosedError` after a protocol + error or a network failure. + + Args: + message: Message to send. + + Raises: + ConnectionClosed: When the connection is closed. + TypeError: If ``message`` doesn't have a supported type. + + """ + await self.ensure_open() + + # While sending a fragmented message, prevent sending other messages + # until all fragments are sent. + while self._fragmented_message_waiter is not None: + await asyncio.shield(self._fragmented_message_waiter) + + # Unfragmented message -- this case must be handled first because + # strings and bytes-like objects are iterable. + + if isinstance(message, (str, bytes, bytearray, memoryview)): + opcode, data = prepare_data(message) + await self.write_frame(True, opcode, data) + + # Catch a common mistake -- passing a dict to send(). + + elif isinstance(message, Mapping): + raise TypeError("data is a dict-like object") + + # Fragmented message -- regular iterator. + + elif isinstance(message, Iterable): + # Work around https://github.com/python/mypy/issues/6227 + message = cast(Iterable[Data], message) + + iter_message = iter(message) + try: + fragment = next(iter_message) + except StopIteration: + return + opcode, data = prepare_data(fragment) + + self._fragmented_message_waiter = self.loop.create_future() + try: + # First fragment. + await self.write_frame(False, opcode, data) + + # Other fragments. + for fragment in iter_message: + confirm_opcode, data = prepare_data(fragment) + if confirm_opcode != opcode: + raise TypeError("data contains inconsistent types") + await self.write_frame(False, OP_CONT, data) + + # Final fragment. + await self.write_frame(True, OP_CONT, b"") + + except (Exception, asyncio.CancelledError): + # We're half-way through a fragmented message and we can't + # complete it. This makes the connection unusable. + self.fail_connection(CloseCode.INTERNAL_ERROR) + raise + + finally: + self._fragmented_message_waiter.set_result(None) + self._fragmented_message_waiter = None + + # Fragmented message -- asynchronous iterator + + elif isinstance(message, AsyncIterable): + # Implement aiter_message = aiter(message) without aiter + # Work around https://github.com/python/mypy/issues/5738 + aiter_message = cast( + Callable[[AsyncIterable[Data]], AsyncIterator[Data]], + type(message).__aiter__, + )(message) + try: + # Implement fragment = anext(aiter_message) without anext + # Work around https://github.com/python/mypy/issues/5738 + fragment = await cast( + Callable[[AsyncIterator[Data]], Awaitable[Data]], + type(aiter_message).__anext__, + )(aiter_message) + except StopAsyncIteration: + return + opcode, data = prepare_data(fragment) + + self._fragmented_message_waiter = self.loop.create_future() + try: + # First fragment. + await self.write_frame(False, opcode, data) + + # Other fragments. + async for fragment in aiter_message: + confirm_opcode, data = prepare_data(fragment) + if confirm_opcode != opcode: + raise TypeError("data contains inconsistent types") + await self.write_frame(False, OP_CONT, data) + + # Final fragment. + await self.write_frame(True, OP_CONT, b"") + + except (Exception, asyncio.CancelledError): + # We're half-way through a fragmented message and we can't + # complete it. This makes the connection unusable. + self.fail_connection(CloseCode.INTERNAL_ERROR) + raise + + finally: + self._fragmented_message_waiter.set_result(None) + self._fragmented_message_waiter = None + + else: + raise TypeError("data must be str, bytes-like, or iterable") + + async def close( + self, + code: int = CloseCode.NORMAL_CLOSURE, + reason: str = "", + ) -> None: + """ + Perform the closing handshake. + + :meth:`close` waits for the other end to complete the handshake and + for the TCP connection to terminate. As a consequence, there's no need + to await :meth:`wait_closed` after :meth:`close`. + + :meth:`close` is idempotent: it doesn't do anything once the + connection is closed. + + Wrapping :func:`close` in :func:`~asyncio.create_task` is safe, given + that errors during connection termination aren't particularly useful. + + Canceling :meth:`close` is discouraged. If it takes too long, you can + set a shorter ``close_timeout``. If you don't want to wait, let the + Python process exit, then the OS will take care of closing the TCP + connection. + + Args: + code: WebSocket close code. + reason: WebSocket close reason. + + """ + try: + async with asyncio_timeout(self.close_timeout): + await self.write_close_frame(Close(code, reason)) + except asyncio.TimeoutError: + # If the close frame cannot be sent because the send buffers + # are full, the closing handshake won't complete anyway. + # Fail the connection to shut down faster. + self.fail_connection() + + # If no close frame is received within the timeout, asyncio_timeout() + # cancels the data transfer task and raises TimeoutError. + + # If close() is called multiple times concurrently and one of these + # calls hits the timeout, the data transfer task will be canceled. + # Other calls will receive a CancelledError here. + + try: + # If close() is canceled during the wait, self.transfer_data_task + # is canceled before the timeout elapses. + async with asyncio_timeout(self.close_timeout): + await self.transfer_data_task + except (asyncio.TimeoutError, asyncio.CancelledError): + pass + + # Wait for the close connection task to close the TCP connection. + await asyncio.shield(self.close_connection_task) + + async def wait_closed(self) -> None: + """ + Wait until the connection is closed. + + This coroutine is identical to the :attr:`closed` attribute, except it + can be awaited. + + This can make it easier to detect connection termination, regardless + of its cause, in tasks that interact with the WebSocket connection. + + """ + await asyncio.shield(self.connection_lost_waiter) + + async def ping(self, data: Data | None = None) -> Awaitable[float]: + """ + Send a Ping_. + + .. _Ping: https://datatracker.ietf.org/doc/html/rfc6455#section-5.5.2 + + A ping may serve as a keepalive, as a check that the remote endpoint + received all messages up to this point, or to measure :attr:`latency`. + + Canceling :meth:`ping` is discouraged. If :meth:`ping` doesn't return + immediately, it means the write buffer is full. If you don't want to + wait, you should close the connection. + + Canceling the :class:`~asyncio.Future` returned by :meth:`ping` has no + effect. + + Args: + data: Payload of the ping. A string will be encoded to UTF-8. + If ``data`` is :obj:`None`, the payload is four random bytes. + + Returns: + A future that will be completed when the corresponding pong is + received. You can ignore it if you don't intend to wait. The result + of the future is the latency of the connection in seconds. + + :: + + pong_waiter = await ws.ping() + # only if you want to wait for the corresponding pong + latency = await pong_waiter + + Raises: + ConnectionClosed: When the connection is closed. + RuntimeError: If another ping was sent with the same data and + the corresponding pong wasn't received yet. + + """ + await self.ensure_open() + + if data is not None: + data = prepare_ctrl(data) + + # Protect against duplicates if a payload is explicitly set. + if data in self.pings: + raise RuntimeError("already waiting for a pong with the same data") + + # Generate a unique random payload otherwise. + while data is None or data in self.pings: + data = struct.pack("!I", random.getrandbits(32)) + + pong_waiter = self.loop.create_future() + # Resolution of time.monotonic() may be too low on Windows. + ping_timestamp = time.perf_counter() + self.pings[data] = (pong_waiter, ping_timestamp) + + await self.write_frame(True, OP_PING, data) + + return asyncio.shield(pong_waiter) + + async def pong(self, data: Data = b"") -> None: + """ + Send a Pong_. + + .. _Pong: https://datatracker.ietf.org/doc/html/rfc6455#section-5.5.3 + + An unsolicited pong may serve as a unidirectional heartbeat. + + Canceling :meth:`pong` is discouraged. If :meth:`pong` doesn't return + immediately, it means the write buffer is full. If you don't want to + wait, you should close the connection. + + Args: + data: Payload of the pong. A string will be encoded to UTF-8. + + Raises: + ConnectionClosed: When the connection is closed. + + """ + await self.ensure_open() + + data = prepare_ctrl(data) + + await self.write_frame(True, OP_PONG, data) + + # Private methods - no guarantees. + + def connection_closed_exc(self) -> ConnectionClosed: + exc: ConnectionClosed + if ( + self.close_rcvd is not None + and self.close_rcvd.code in OK_CLOSE_CODES + and self.close_sent is not None + and self.close_sent.code in OK_CLOSE_CODES + ): + exc = ConnectionClosedOK( + self.close_rcvd, + self.close_sent, + self.close_rcvd_then_sent, + ) + else: + exc = ConnectionClosedError( + self.close_rcvd, + self.close_sent, + self.close_rcvd_then_sent, + ) + # Chain to the exception that terminated data transfer, if any. + exc.__cause__ = self.transfer_data_exc + return exc + + async def ensure_open(self) -> None: + """ + Check that the WebSocket connection is open. + + Raise :exc:`~websockets.exceptions.ConnectionClosed` if it isn't. + + """ + # Handle cases from most common to least common for performance. + if self.state is State.OPEN: + # If self.transfer_data_task exited without a closing handshake, + # self.close_connection_task may be closing the connection, going + # straight from OPEN to CLOSED. + if self.transfer_data_task.done(): + await asyncio.shield(self.close_connection_task) + raise self.connection_closed_exc() + else: + return + + if self.state is State.CLOSED: + raise self.connection_closed_exc() + + if self.state is State.CLOSING: + # If we started the closing handshake, wait for its completion to + # get the proper close code and reason. self.close_connection_task + # will complete within 4 or 5 * close_timeout after close(). The + # CLOSING state also occurs when failing the connection. In that + # case self.close_connection_task will complete even faster. + await asyncio.shield(self.close_connection_task) + raise self.connection_closed_exc() + + # Control may only reach this point in buggy third-party subclasses. + assert self.state is State.CONNECTING + raise InvalidState("WebSocket connection isn't established yet") + + async def transfer_data(self) -> None: + """ + Read incoming messages and put them in a queue. + + This coroutine runs in a task until the closing handshake is started. + + """ + try: + while True: + message = await self.read_message() + + # Exit the loop when receiving a close frame. + if message is None: + break + + # Wait until there's room in the queue (if necessary). + if self.max_queue is not None: + while len(self.messages) >= self.max_queue: + self._put_message_waiter = self.loop.create_future() + try: + await asyncio.shield(self._put_message_waiter) + finally: + self._put_message_waiter = None + + # Put the message in the queue. + self.messages.append(message) + + # Notify recv(). + if self._pop_message_waiter is not None: + self._pop_message_waiter.set_result(None) + self._pop_message_waiter = None + + except asyncio.CancelledError as exc: + self.transfer_data_exc = exc + # If fail_connection() cancels this task, avoid logging the error + # twice and failing the connection again. + raise + + except ProtocolError as exc: + self.transfer_data_exc = exc + self.fail_connection(CloseCode.PROTOCOL_ERROR) + + except (ConnectionError, TimeoutError, EOFError, ssl.SSLError) as exc: + # Reading data with self.reader.readexactly may raise: + # - most subclasses of ConnectionError if the TCP connection + # breaks, is reset, or is aborted; + # - TimeoutError if the TCP connection times out; + # - IncompleteReadError, a subclass of EOFError, if fewer + # bytes are available than requested; + # - ssl.SSLError if the other side infringes the TLS protocol. + self.transfer_data_exc = exc + self.fail_connection(CloseCode.ABNORMAL_CLOSURE) + + except UnicodeDecodeError as exc: + self.transfer_data_exc = exc + self.fail_connection(CloseCode.INVALID_DATA) + + except PayloadTooBig as exc: + self.transfer_data_exc = exc + self.fail_connection(CloseCode.MESSAGE_TOO_BIG) + + except Exception as exc: + # This shouldn't happen often because exceptions expected under + # regular circumstances are handled above. If it does, consider + # catching and handling more exceptions. + self.logger.error("data transfer failed", exc_info=True) + + self.transfer_data_exc = exc + self.fail_connection(CloseCode.INTERNAL_ERROR) + + async def read_message(self) -> Data | None: + """ + Read a single message from the connection. + + Re-assemble data frames if the message is fragmented. + + Return :obj:`None` when the closing handshake is started. + + """ + frame = await self.read_data_frame(max_size=self.max_size) + + # A close frame was received. + if frame is None: + return None + + if frame.opcode == OP_TEXT: + text = True + elif frame.opcode == OP_BINARY: + text = False + else: # frame.opcode == OP_CONT + raise ProtocolError("unexpected opcode") + + # Shortcut for the common case - no fragmentation + if frame.fin: + return frame.data.decode() if text else frame.data + + # 5.4. Fragmentation + fragments: list[Data] = [] + max_size = self.max_size + if text: + decoder_factory = codecs.getincrementaldecoder("utf-8") + decoder = decoder_factory(errors="strict") + if max_size is None: + + def append(frame: Frame) -> None: + nonlocal fragments + fragments.append(decoder.decode(frame.data, frame.fin)) + + else: + + def append(frame: Frame) -> None: + nonlocal fragments, max_size + fragments.append(decoder.decode(frame.data, frame.fin)) + assert isinstance(max_size, int) + max_size -= len(frame.data) + + else: + if max_size is None: + + def append(frame: Frame) -> None: + nonlocal fragments + fragments.append(frame.data) + + else: + + def append(frame: Frame) -> None: + nonlocal fragments, max_size + fragments.append(frame.data) + assert isinstance(max_size, int) + max_size -= len(frame.data) + + append(frame) + + while not frame.fin: + frame = await self.read_data_frame(max_size=max_size) + if frame is None: + raise ProtocolError("incomplete fragmented message") + if frame.opcode != OP_CONT: + raise ProtocolError("unexpected opcode") + append(frame) + + return ("" if text else b"").join(fragments) + + async def read_data_frame(self, max_size: int | None) -> Frame | None: + """ + Read a single data frame from the connection. + + Process control frames received before the next data frame. + + Return :obj:`None` if a close frame is encountered before any data frame. + + """ + # 6.2. Receiving Data + while True: + frame = await self.read_frame(max_size) + + # 5.5. Control Frames + if frame.opcode == OP_CLOSE: + # 7.1.5. The WebSocket Connection Close Code + # 7.1.6. The WebSocket Connection Close Reason + self.close_rcvd = Close.parse(frame.data) + if self.close_sent is not None: + self.close_rcvd_then_sent = False + try: + # Echo the original data instead of re-serializing it with + # Close.serialize() because that fails when the close frame + # is empty and Close.parse() synthesizes a 1005 close code. + await self.write_close_frame(self.close_rcvd, frame.data) + except ConnectionClosed: + # Connection closed before we could echo the close frame. + pass + return None + + elif frame.opcode == OP_PING: + # Answer pings, unless connection is CLOSING. + if self.state is State.OPEN: + try: + await self.pong(frame.data) + except ConnectionClosed: + # Connection closed while draining write buffer. + pass + + elif frame.opcode == OP_PONG: + if frame.data in self.pings: + pong_timestamp = time.perf_counter() + # Sending a pong for only the most recent ping is legal. + # Acknowledge all previous pings too in that case. + ping_id = None + ping_ids = [] + for ping_id, (pong_waiter, ping_timestamp) in self.pings.items(): + ping_ids.append(ping_id) + if not pong_waiter.done(): + pong_waiter.set_result(pong_timestamp - ping_timestamp) + if ping_id == frame.data: + self.latency = pong_timestamp - ping_timestamp + break + else: + raise AssertionError("solicited pong not found in pings") + # Remove acknowledged pings from self.pings. + for ping_id in ping_ids: + del self.pings[ping_id] + + # 5.6. Data Frames + else: + return frame + + async def read_frame(self, max_size: int | None) -> Frame: + """ + Read a single frame from the connection. + + """ + frame = await Frame.read( + self.reader.readexactly, + mask=not self.is_client, + max_size=max_size, + extensions=self.extensions, + ) + if self.debug: + self.logger.debug("< %s", frame) + return frame + + def write_frame_sync(self, fin: bool, opcode: int, data: bytes) -> None: + frame = Frame(fin, Opcode(opcode), data) + if self.debug: + self.logger.debug("> %s", frame) + frame.write( + self.transport.write, + mask=self.is_client, + extensions=self.extensions, + ) + + async def drain(self) -> None: + try: + # drain() cannot be called concurrently by multiple coroutines. + # See https://github.com/python/cpython/issues/74116 for details. + # This workaround can be removed when dropping Python < 3.10. + async with self._drain_lock: + # Handle flow control automatically. + await self._drain() + except ConnectionError: + # Terminate the connection if the socket died. + self.fail_connection() + # Wait until the connection is closed to raise ConnectionClosed + # with the correct code and reason. + await self.ensure_open() + + async def write_frame( + self, fin: bool, opcode: int, data: bytes, *, _state: int = State.OPEN + ) -> None: + # Defensive assertion for protocol compliance. + if self.state is not _state: # pragma: no cover + raise InvalidState( + f"Cannot write to a WebSocket in the {self.state.name} state" + ) + self.write_frame_sync(fin, opcode, data) + await self.drain() + + async def write_close_frame(self, close: Close, data: bytes | None = None) -> None: + """ + Write a close frame if and only if the connection state is OPEN. + + This dedicated coroutine must be used for writing close frames to + ensure that at most one close frame is sent on a given connection. + + """ + # Test and set the connection state before sending the close frame to + # avoid sending two frames in case of concurrent calls. + if self.state is State.OPEN: + # 7.1.3. The WebSocket Closing Handshake is Started + self.state = State.CLOSING + if self.debug: + self.logger.debug("= connection is CLOSING") + + self.close_sent = close + if self.close_rcvd is not None: + self.close_rcvd_then_sent = True + if data is None: + data = close.serialize() + + # 7.1.2. Start the WebSocket Closing Handshake + await self.write_frame(True, OP_CLOSE, data, _state=State.CLOSING) + + async def keepalive_ping(self) -> None: + """ + Send a Ping frame and wait for a Pong frame at regular intervals. + + This coroutine exits when the connection terminates and one of the + following happens: + + - :meth:`ping` raises :exc:`ConnectionClosed`, or + - :meth:`close_connection` cancels :attr:`keepalive_ping_task`. + + """ + if self.ping_interval is None: + return + + try: + while True: + await asyncio.sleep(self.ping_interval) + + self.logger.debug("% sending keepalive ping") + pong_waiter = await self.ping() + + if self.ping_timeout is not None: + try: + async with asyncio_timeout(self.ping_timeout): + # Raises CancelledError if the connection is closed, + # when close_connection() cancels keepalive_ping(). + # Raises ConnectionClosed if the connection is lost, + # when connection_lost() calls abort_pings(). + await pong_waiter + self.logger.debug("% received keepalive pong") + except asyncio.TimeoutError: + if self.debug: + self.logger.debug("! timed out waiting for keepalive pong") + self.fail_connection( + CloseCode.INTERNAL_ERROR, + "keepalive ping timeout", + ) + break + + except ConnectionClosed: + pass + + except Exception: + self.logger.error("keepalive ping failed", exc_info=True) + + async def close_connection(self) -> None: + """ + 7.1.1. Close the WebSocket Connection + + When the opening handshake succeeds, :meth:`connection_open` starts + this coroutine in a task. It waits for the data transfer phase to + complete then it closes the TCP connection cleanly. + + When the opening handshake fails, :meth:`fail_connection` does the + same. There's no data transfer phase in that case. + + """ + try: + # Wait for the data transfer phase to complete. + if hasattr(self, "transfer_data_task"): + try: + await self.transfer_data_task + except asyncio.CancelledError: + pass + + # Cancel the keepalive ping task. + if hasattr(self, "keepalive_ping_task"): + self.keepalive_ping_task.cancel() + + # A client should wait for a TCP close from the server. + if self.is_client and hasattr(self, "transfer_data_task"): + if await self.wait_for_connection_lost(): + return + if self.debug: + self.logger.debug("! timed out waiting for TCP close") + + # Half-close the TCP connection if possible (when there's no TLS). + if self.transport.can_write_eof(): + if self.debug: + self.logger.debug("x half-closing TCP connection") + # write_eof() doesn't document which exceptions it raises. + # "[Errno 107] Transport endpoint is not connected" happens + # but it isn't completely clear under which circumstances. + # uvloop can raise RuntimeError here. + try: + self.transport.write_eof() + except (OSError, RuntimeError): # pragma: no cover + pass + + if await self.wait_for_connection_lost(): + return + if self.debug: + self.logger.debug("! timed out waiting for TCP close") + + finally: + # The try/finally ensures that the transport never remains open, + # even if this coroutine is canceled (for example). + await self.close_transport() + + async def close_transport(self) -> None: + """ + Close the TCP connection. + + """ + # If connection_lost() was called, the TCP connection is closed. + # However, if TLS is enabled, the transport still needs closing. + # Else asyncio complains: ResourceWarning: unclosed transport. + if self.connection_lost_waiter.done() and self.transport.is_closing(): + return + + # Close the TCP connection. Buffers are flushed asynchronously. + if self.debug: + self.logger.debug("x closing TCP connection") + self.transport.close() + + if await self.wait_for_connection_lost(): + return + if self.debug: + self.logger.debug("! timed out waiting for TCP close") + + # Abort the TCP connection. Buffers are discarded. + if self.debug: + self.logger.debug("x aborting TCP connection") + self.transport.abort() + + # connection_lost() is called quickly after aborting. + await self.wait_for_connection_lost() + + async def wait_for_connection_lost(self) -> bool: + """ + Wait until the TCP connection is closed or ``self.close_timeout`` elapses. + + Return :obj:`True` if the connection is closed and :obj:`False` + otherwise. + + """ + if not self.connection_lost_waiter.done(): + try: + async with asyncio_timeout(self.close_timeout): + await asyncio.shield(self.connection_lost_waiter) + except asyncio.TimeoutError: + pass + # Re-check self.connection_lost_waiter.done() synchronously because + # connection_lost() could run between the moment the timeout occurs + # and the moment this coroutine resumes running. + return self.connection_lost_waiter.done() + + def fail_connection( + self, + code: int = CloseCode.ABNORMAL_CLOSURE, + reason: str = "", + ) -> None: + """ + 7.1.7. Fail the WebSocket Connection + + This requires: + + 1. Stopping all processing of incoming data, which means cancelling + :attr:`transfer_data_task`. The close code will be 1006 unless a + close frame was received earlier. + + 2. Sending a close frame with an appropriate code if the opening + handshake succeeded and the other side is likely to process it. + + 3. Closing the connection. :meth:`close_connection` takes care of + this once :attr:`transfer_data_task` exits after being canceled. + + (The specification describes these steps in the opposite order.) + + """ + if self.debug: + self.logger.debug("! failing connection with code %d", code) + + # Cancel transfer_data_task if the opening handshake succeeded. + # cancel() is idempotent and ignored if the task is done already. + if hasattr(self, "transfer_data_task"): + self.transfer_data_task.cancel() + + # Send a close frame when the state is OPEN (a close frame was already + # sent if it's CLOSING), except when failing the connection because of + # an error reading from or writing to the network. + # Don't send a close frame if the connection is broken. + if code != CloseCode.ABNORMAL_CLOSURE and self.state is State.OPEN: + close = Close(code, reason) + + # Write the close frame without draining the write buffer. + + # Keeping fail_connection() synchronous guarantees it can't + # get stuck and simplifies the implementation of the callers. + # Not drainig the write buffer is acceptable in this context. + + # This duplicates a few lines of code from write_close_frame(). + + self.state = State.CLOSING + if self.debug: + self.logger.debug("= connection is CLOSING") + + # If self.close_rcvd was set, the connection state would be + # CLOSING. Therefore self.close_rcvd isn't set and we don't + # have to set self.close_rcvd_then_sent. + assert self.close_rcvd is None + self.close_sent = close + + self.write_frame_sync(True, OP_CLOSE, close.serialize()) + + # Start close_connection_task if the opening handshake didn't succeed. + if not hasattr(self, "close_connection_task"): + self.close_connection_task = self.loop.create_task(self.close_connection()) + + def abort_pings(self) -> None: + """ + Raise ConnectionClosed in pending keepalive pings. + + They'll never receive a pong once the connection is closed. + + """ + assert self.state is State.CLOSED + exc = self.connection_closed_exc() + + for pong_waiter, _ping_timestamp in self.pings.values(): + pong_waiter.set_exception(exc) + # If the exception is never retrieved, it will be logged when ping + # is garbage-collected. This is confusing for users. + # Given that ping is done (with an exception), canceling it does + # nothing, but it prevents logging the exception. + pong_waiter.cancel() + + # asyncio.Protocol methods + + def connection_made(self, transport: asyncio.BaseTransport) -> None: + """ + Configure write buffer limits. + + The high-water limit is defined by ``self.write_limit``. + + The low-water limit currently defaults to ``self.write_limit // 4`` in + :meth:`~asyncio.WriteTransport.set_write_buffer_limits`, which should + be all right for reasonable use cases of this library. + + This is the earliest point where we can get hold of the transport, + which means it's the best point for configuring it. + + """ + transport = cast(asyncio.Transport, transport) + transport.set_write_buffer_limits(self.write_limit) + self.transport = transport + + # Copied from asyncio.StreamReaderProtocol + self.reader.set_transport(transport) + + def connection_lost(self, exc: Exception | None) -> None: + """ + 7.1.4. The WebSocket Connection is Closed. + + """ + self.state = State.CLOSED + self.logger.debug("= connection is CLOSED") + + self.abort_pings() + + # If self.connection_lost_waiter isn't pending, that's a bug, because: + # - it's set only here in connection_lost() which is called only once; + # - it must never be canceled. + self.connection_lost_waiter.set_result(None) + + if True: # pragma: no cover + # Copied from asyncio.StreamReaderProtocol + if self.reader is not None: + if exc is None: + self.reader.feed_eof() + else: + self.reader.set_exception(exc) + + # Copied from asyncio.FlowControlMixin + # Wake up the writer if currently paused. + if not self._paused: + return + waiter = self._drain_waiter + if waiter is None: + return + self._drain_waiter = None + if waiter.done(): + return + if exc is None: + waiter.set_result(None) + else: + waiter.set_exception(exc) + + def pause_writing(self) -> None: # pragma: no cover + assert not self._paused + self._paused = True + + def resume_writing(self) -> None: # pragma: no cover + assert self._paused + self._paused = False + + waiter = self._drain_waiter + if waiter is not None: + self._drain_waiter = None + if not waiter.done(): + waiter.set_result(None) + + def data_received(self, data: bytes) -> None: + self.reader.feed_data(data) + + def eof_received(self) -> None: + """ + Close the transport after receiving EOF. + + The WebSocket protocol has its own closing handshake: endpoints close + the TCP or TLS connection after sending and receiving a close frame. + + As a consequence, they never need to write after receiving EOF, so + there's no reason to keep the transport open by returning :obj:`True`. + + Besides, that doesn't work on TLS connections. + + """ + self.reader.feed_eof() + + +# broadcast() is defined in the protocol module even though it's primarily +# used by servers and documented in the server module because it works with +# client connections too and because it's easier to test together with the +# WebSocketCommonProtocol class. + + +def broadcast( + websockets: Iterable[WebSocketCommonProtocol], + message: Data, + raise_exceptions: bool = False, +) -> None: + """ + Broadcast a message to several WebSocket connections. + + A string (:class:`str`) is sent as a Text_ frame. A bytestring or bytes-like + object (:class:`bytes`, :class:`bytearray`, or :class:`memoryview`) is sent + as a Binary_ frame. + + .. _Text: https://datatracker.ietf.org/doc/html/rfc6455#section-5.6 + .. _Binary: https://datatracker.ietf.org/doc/html/rfc6455#section-5.6 + + :func:`broadcast` pushes the message synchronously to all connections even + if their write buffers are overflowing. There's no backpressure. + + If you broadcast messages faster than a connection can handle them, messages + will pile up in its write buffer until the connection times out. Keep + ``ping_interval`` and ``ping_timeout`` low to prevent excessive memory usage + from slow connections. + + Unlike :meth:`~websockets.legacy.protocol.WebSocketCommonProtocol.send`, + :func:`broadcast` doesn't support sending fragmented messages. Indeed, + fragmentation is useful for sending large messages without buffering them in + memory, while :func:`broadcast` buffers one copy per connection as fast as + possible. + + :func:`broadcast` skips connections that aren't open in order to avoid + errors on connections where the closing handshake is in progress. + + :func:`broadcast` ignores failures to write the message on some connections. + It continues writing to other connections. On Python 3.11 and above, you may + set ``raise_exceptions`` to :obj:`True` to record failures and raise all + exceptions in a :pep:`654` :exc:`ExceptionGroup`. + + While :func:`broadcast` makes more sense for servers, it works identically + with clients, if you have a use case for opening connections to many servers + and broadcasting a message to them. + + Args: + websockets: WebSocket connections to which the message will be sent. + message: Message to send. + raise_exceptions: Whether to raise an exception in case of failures. + + Raises: + TypeError: If ``message`` doesn't have a supported type. + + """ + if not isinstance(message, (str, bytes, bytearray, memoryview)): + raise TypeError("data must be str or bytes-like") + + if raise_exceptions: + if sys.version_info[:2] < (3, 11): # pragma: no cover + raise ValueError("raise_exceptions requires at least Python 3.11") + exceptions = [] + + opcode, data = prepare_data(message) + + for websocket in websockets: + if websocket.state is not State.OPEN: + continue + + if websocket._fragmented_message_waiter is not None: + if raise_exceptions: + exception = RuntimeError("sending a fragmented message") + exceptions.append(exception) + else: + websocket.logger.warning( + "skipped broadcast: sending a fragmented message", + ) + continue + + try: + websocket.write_frame_sync(True, opcode, data) + except Exception as write_exception: + if raise_exceptions: + exception = RuntimeError("failed to write message") + exception.__cause__ = write_exception + exceptions.append(exception) + else: + websocket.logger.warning( + "skipped broadcast: failed to write message", + exc_info=True, + ) + + if raise_exceptions and exceptions: + raise ExceptionGroup("skipped broadcast", exceptions) + + +# Pretend that broadcast is actually defined in the server module. +broadcast.__module__ = "websockets.legacy.server" diff --git a/hackaton/lib/python3.12/site-packages/websockets/legacy/server.py b/hackaton/lib/python3.12/site-packages/websockets/legacy/server.py new file mode 100644 index 0000000..2cb9b1a --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/websockets/legacy/server.py @@ -0,0 +1,1200 @@ +from __future__ import annotations + +import asyncio +import email.utils +import functools +import http +import inspect +import logging +import socket +import warnings +from types import TracebackType +from typing import ( + Any, + Awaitable, + Callable, + Generator, + Iterable, + Sequence, + Tuple, + Union, + cast, +) + +from ..asyncio.compatibility import asyncio_timeout +from ..datastructures import Headers, HeadersLike, MultipleValuesError +from ..exceptions import ( + InvalidHandshake, + InvalidHeader, + InvalidOrigin, + InvalidUpgrade, + NegotiationError, +) +from ..extensions import Extension, ServerExtensionFactory +from ..extensions.permessage_deflate import enable_server_permessage_deflate +from ..headers import ( + build_extension, + parse_extension, + parse_subprotocol, + validate_subprotocols, +) +from ..http11 import SERVER +from ..protocol import State +from ..typing import ExtensionHeader, LoggerLike, Origin, StatusLike, Subprotocol +from .exceptions import AbortHandshake, InvalidMessage +from .handshake import build_response, check_request +from .http import read_request +from .protocol import WebSocketCommonProtocol, broadcast + + +__all__ = [ + "broadcast", + "serve", + "unix_serve", + "WebSocketServerProtocol", + "WebSocketServer", +] + + +# Change to HeadersLike | ... when dropping Python < 3.10. +HeadersLikeOrCallable = Union[HeadersLike, Callable[[str, Headers], HeadersLike]] + +# Change to tuple[...] when dropping Python < 3.9. +HTTPResponse = Tuple[StatusLike, HeadersLike, bytes] + + +class WebSocketServerProtocol(WebSocketCommonProtocol): + """ + WebSocket server connection. + + :class:`WebSocketServerProtocol` provides :meth:`recv` and :meth:`send` + coroutines for receiving and sending messages. + + It supports asynchronous iteration to receive messages:: + + async for message in websocket: + await process(message) + + The iterator exits normally when the connection is closed with close code + 1000 (OK) or 1001 (going away) or without a close code. It raises + a :exc:`~websockets.exceptions.ConnectionClosedError` when the connection + is closed with any other code. + + You may customize the opening handshake in a subclass by + overriding :meth:`process_request` or :meth:`select_subprotocol`. + + Args: + ws_server: WebSocket server that created this connection. + + See :func:`serve` for the documentation of ``ws_handler``, ``logger``, ``origins``, + ``extensions``, ``subprotocols``, ``extra_headers``, and ``server_header``. + + See :class:`~websockets.legacy.protocol.WebSocketCommonProtocol` for the + documentation of ``ping_interval``, ``ping_timeout``, ``close_timeout``, + ``max_size``, ``max_queue``, ``read_limit``, and ``write_limit``. + + """ + + is_client = False + side = "server" + + def __init__( + self, + # The version that accepts the path in the second argument is deprecated. + ws_handler: ( + Callable[[WebSocketServerProtocol], Awaitable[Any]] + | Callable[[WebSocketServerProtocol, str], Awaitable[Any]] + ), + ws_server: WebSocketServer, + *, + logger: LoggerLike | None = None, + origins: Sequence[Origin | None] | None = None, + extensions: Sequence[ServerExtensionFactory] | None = None, + subprotocols: Sequence[Subprotocol] | None = None, + extra_headers: HeadersLikeOrCallable | None = None, + server_header: str | None = SERVER, + process_request: ( + Callable[[str, Headers], Awaitable[HTTPResponse | None]] | None + ) = None, + select_subprotocol: ( + Callable[[Sequence[Subprotocol], Sequence[Subprotocol]], Subprotocol] | None + ) = None, + open_timeout: float | None = 10, + **kwargs: Any, + ) -> None: + if logger is None: + logger = logging.getLogger("websockets.server") + super().__init__(logger=logger, **kwargs) + # For backwards compatibility with 6.0 or earlier. + if origins is not None and "" in origins: + warnings.warn("use None instead of '' in origins", DeprecationWarning) + origins = [None if origin == "" else origin for origin in origins] + # For backwards compatibility with 10.0 or earlier. Done here in + # addition to serve to trigger the deprecation warning on direct + # use of WebSocketServerProtocol. + self.ws_handler = remove_path_argument(ws_handler) + self.ws_server = ws_server + self.origins = origins + self.available_extensions = extensions + self.available_subprotocols = subprotocols + self.extra_headers = extra_headers + self.server_header = server_header + self._process_request = process_request + self._select_subprotocol = select_subprotocol + self.open_timeout = open_timeout + + def connection_made(self, transport: asyncio.BaseTransport) -> None: + """ + Register connection and initialize a task to handle it. + + """ + super().connection_made(transport) + # Register the connection with the server before creating the handler + # task. Registering at the beginning of the handler coroutine would + # create a race condition between the creation of the task, which + # schedules its execution, and the moment the handler starts running. + self.ws_server.register(self) + self.handler_task = self.loop.create_task(self.handler()) + + async def handler(self) -> None: + """ + Handle the lifecycle of a WebSocket connection. + + Since this method doesn't have a caller able to handle exceptions, it + attempts to log relevant ones and guarantees that the TCP connection is + closed before exiting. + + """ + try: + try: + async with asyncio_timeout(self.open_timeout): + await self.handshake( + origins=self.origins, + available_extensions=self.available_extensions, + available_subprotocols=self.available_subprotocols, + extra_headers=self.extra_headers, + ) + except asyncio.TimeoutError: # pragma: no cover + raise + except ConnectionError: + raise + except Exception as exc: + if isinstance(exc, AbortHandshake): + status, headers, body = exc.status, exc.headers, exc.body + elif isinstance(exc, InvalidOrigin): + if self.debug: + self.logger.debug("! invalid origin", exc_info=True) + status, headers, body = ( + http.HTTPStatus.FORBIDDEN, + Headers(), + f"Failed to open a WebSocket connection: {exc}.\n".encode(), + ) + elif isinstance(exc, InvalidUpgrade): + if self.debug: + self.logger.debug("! invalid upgrade", exc_info=True) + status, headers, body = ( + http.HTTPStatus.UPGRADE_REQUIRED, + Headers([("Upgrade", "websocket")]), + ( + f"Failed to open a WebSocket connection: {exc}.\n" + f"\n" + f"You cannot access a WebSocket server directly " + f"with a browser. You need a WebSocket client.\n" + ).encode(), + ) + elif isinstance(exc, InvalidHandshake): + if self.debug: + self.logger.debug("! invalid handshake", exc_info=True) + exc_chain = cast(BaseException, exc) + exc_str = f"{exc_chain}" + while exc_chain.__cause__ is not None: + exc_chain = exc_chain.__cause__ + exc_str += f"; {exc_chain}" + status, headers, body = ( + http.HTTPStatus.BAD_REQUEST, + Headers(), + f"Failed to open a WebSocket connection: {exc_str}.\n".encode(), + ) + else: + self.logger.error("opening handshake failed", exc_info=True) + status, headers, body = ( + http.HTTPStatus.INTERNAL_SERVER_ERROR, + Headers(), + ( + b"Failed to open a WebSocket connection.\n" + b"See server log for more information.\n" + ), + ) + + headers.setdefault("Date", email.utils.formatdate(usegmt=True)) + if self.server_header: + headers.setdefault("Server", self.server_header) + + headers.setdefault("Content-Length", str(len(body))) + headers.setdefault("Content-Type", "text/plain") + headers.setdefault("Connection", "close") + + self.write_http_response(status, headers, body) + self.logger.info( + "connection rejected (%d %s)", status.value, status.phrase + ) + await self.close_transport() + return + + try: + await self.ws_handler(self) + except Exception: + self.logger.error("connection handler failed", exc_info=True) + if not self.closed: + self.fail_connection(1011) + raise + + try: + await self.close() + except ConnectionError: + raise + except Exception: + self.logger.error("closing handshake failed", exc_info=True) + raise + + except Exception: + # Last-ditch attempt to avoid leaking connections on errors. + try: + self.transport.close() + except Exception: # pragma: no cover + pass + + finally: + # Unregister the connection with the server when the handler task + # terminates. Registration is tied to the lifecycle of the handler + # task because the server waits for tasks attached to registered + # connections before terminating. + self.ws_server.unregister(self) + self.logger.info("connection closed") + + async def read_http_request(self) -> tuple[str, Headers]: + """ + Read request line and headers from the HTTP request. + + If the request contains a body, it may be read from ``self.reader`` + after this coroutine returns. + + Raises: + InvalidMessage: If the HTTP message is malformed or isn't an + HTTP/1.1 GET request. + + """ + try: + path, headers = await read_request(self.reader) + except asyncio.CancelledError: # pragma: no cover + raise + except Exception as exc: + raise InvalidMessage("did not receive a valid HTTP request") from exc + + if self.debug: + self.logger.debug("< GET %s HTTP/1.1", path) + for key, value in headers.raw_items(): + self.logger.debug("< %s: %s", key, value) + + self.path = path + self.request_headers = headers + + return path, headers + + def write_http_response( + self, status: http.HTTPStatus, headers: Headers, body: bytes | None = None + ) -> None: + """ + Write status line and headers to the HTTP response. + + This coroutine is also able to write a response body. + + """ + self.response_headers = headers + + if self.debug: + self.logger.debug("> HTTP/1.1 %d %s", status.value, status.phrase) + for key, value in headers.raw_items(): + self.logger.debug("> %s: %s", key, value) + if body is not None: + self.logger.debug("> [body] (%d bytes)", len(body)) + + # Since the status line and headers only contain ASCII characters, + # we can keep this simple. + response = f"HTTP/1.1 {status.value} {status.phrase}\r\n" + response += str(headers) + + self.transport.write(response.encode()) + + if body is not None: + self.transport.write(body) + + async def process_request( + self, path: str, request_headers: Headers + ) -> HTTPResponse | None: + """ + Intercept the HTTP request and return an HTTP response if appropriate. + + You may override this method in a :class:`WebSocketServerProtocol` + subclass, for example: + + * to return an HTTP 200 OK response on a given path; then a load + balancer can use this path for a health check; + * to authenticate the request and return an HTTP 401 Unauthorized or an + HTTP 403 Forbidden when authentication fails. + + You may also override this method with the ``process_request`` + argument of :func:`serve` and :class:`WebSocketServerProtocol`. This + is equivalent, except ``process_request`` won't have access to the + protocol instance, so it can't store information for later use. + + :meth:`process_request` is expected to complete quickly. If it may run + for a long time, then it should await :meth:`wait_closed` and exit if + :meth:`wait_closed` completes, or else it could prevent the server + from shutting down. + + Args: + path: Request path, including optional query string. + request_headers: Request headers. + + Returns: + tuple[StatusLike, HeadersLike, bytes] | None: :obj:`None` to + continue the WebSocket handshake normally. + + An HTTP response, represented by a 3-uple of the response status, + headers, and body, to abort the WebSocket handshake and return + that HTTP response instead. + + """ + if self._process_request is not None: + response = self._process_request(path, request_headers) + if isinstance(response, Awaitable): + return await response + else: + # For backwards compatibility with 7.0. + warnings.warn( + "declare process_request as a coroutine", DeprecationWarning + ) + return response + return None + + @staticmethod + def process_origin( + headers: Headers, origins: Sequence[Origin | None] | None = None + ) -> Origin | None: + """ + Handle the Origin HTTP request header. + + Args: + headers: Request headers. + origins: Optional list of acceptable origins. + + Raises: + InvalidOrigin: If the origin isn't acceptable. + + """ + # "The user agent MUST NOT include more than one Origin header field" + # per https://datatracker.ietf.org/doc/html/rfc6454#section-7.3. + try: + origin = headers.get("Origin") + except MultipleValuesError as exc: + raise InvalidHeader("Origin", "multiple values") from exc + if origin is not None: + origin = cast(Origin, origin) + if origins is not None: + if origin not in origins: + raise InvalidOrigin(origin) + return origin + + @staticmethod + def process_extensions( + headers: Headers, + available_extensions: Sequence[ServerExtensionFactory] | None, + ) -> tuple[str | None, list[Extension]]: + """ + Handle the Sec-WebSocket-Extensions HTTP request header. + + Accept or reject each extension proposed in the client request. + Negotiate parameters for accepted extensions. + + Return the Sec-WebSocket-Extensions HTTP response header and the list + of accepted extensions. + + :rfc:`6455` leaves the rules up to the specification of each + :extension. + + To provide this level of flexibility, for each extension proposed by + the client, we check for a match with each extension available in the + server configuration. If no match is found, the extension is ignored. + + If several variants of the same extension are proposed by the client, + it may be accepted several times, which won't make sense in general. + Extensions must implement their own requirements. For this purpose, + the list of previously accepted extensions is provided. + + This process doesn't allow the server to reorder extensions. It can + only select a subset of the extensions proposed by the client. + + Other requirements, for example related to mandatory extensions or the + order of extensions, may be implemented by overriding this method. + + Args: + headers: Request headers. + extensions: Optional list of supported extensions. + + Raises: + InvalidHandshake: To abort the handshake with an HTTP 400 error. + + """ + response_header_value: str | None = None + + extension_headers: list[ExtensionHeader] = [] + accepted_extensions: list[Extension] = [] + + header_values = headers.get_all("Sec-WebSocket-Extensions") + + if header_values and available_extensions: + parsed_header_values: list[ExtensionHeader] = sum( + [parse_extension(header_value) for header_value in header_values], [] + ) + + for name, request_params in parsed_header_values: + for ext_factory in available_extensions: + # Skip non-matching extensions based on their name. + if ext_factory.name != name: + continue + + # Skip non-matching extensions based on their params. + try: + response_params, extension = ext_factory.process_request_params( + request_params, accepted_extensions + ) + except NegotiationError: + continue + + # Add matching extension to the final list. + extension_headers.append((name, response_params)) + accepted_extensions.append(extension) + + # Break out of the loop once we have a match. + break + + # If we didn't break from the loop, no extension in our list + # matched what the client sent. The extension is declined. + + # Serialize extension header. + if extension_headers: + response_header_value = build_extension(extension_headers) + + return response_header_value, accepted_extensions + + # Not @staticmethod because it calls self.select_subprotocol() + def process_subprotocol( + self, headers: Headers, available_subprotocols: Sequence[Subprotocol] | None + ) -> Subprotocol | None: + """ + Handle the Sec-WebSocket-Protocol HTTP request header. + + Return Sec-WebSocket-Protocol HTTP response header, which is the same + as the selected subprotocol. + + Args: + headers: Request headers. + available_subprotocols: Optional list of supported subprotocols. + + Raises: + InvalidHandshake: To abort the handshake with an HTTP 400 error. + + """ + subprotocol: Subprotocol | None = None + + header_values = headers.get_all("Sec-WebSocket-Protocol") + + if header_values and available_subprotocols: + parsed_header_values: list[Subprotocol] = sum( + [parse_subprotocol(header_value) for header_value in header_values], [] + ) + + subprotocol = self.select_subprotocol( + parsed_header_values, available_subprotocols + ) + + return subprotocol + + def select_subprotocol( + self, + client_subprotocols: Sequence[Subprotocol], + server_subprotocols: Sequence[Subprotocol], + ) -> Subprotocol | None: + """ + Pick a subprotocol among those supported by the client and the server. + + If several subprotocols are available, select the preferred subprotocol + by giving equal weight to the preferences of the client and the server. + + If no subprotocol is available, proceed without a subprotocol. + + You may provide a ``select_subprotocol`` argument to :func:`serve` or + :class:`WebSocketServerProtocol` to override this logic. For example, + you could reject the handshake if the client doesn't support a + particular subprotocol, rather than accept the handshake without that + subprotocol. + + Args: + client_subprotocols: List of subprotocols offered by the client. + server_subprotocols: List of subprotocols available on the server. + + Returns: + Selected subprotocol, if a common subprotocol was found. + + :obj:`None` to continue without a subprotocol. + + """ + if self._select_subprotocol is not None: + return self._select_subprotocol(client_subprotocols, server_subprotocols) + + subprotocols = set(client_subprotocols) & set(server_subprotocols) + if not subprotocols: + return None + return sorted( + subprotocols, + key=lambda p: client_subprotocols.index(p) + server_subprotocols.index(p), + )[0] + + async def handshake( + self, + origins: Sequence[Origin | None] | None = None, + available_extensions: Sequence[ServerExtensionFactory] | None = None, + available_subprotocols: Sequence[Subprotocol] | None = None, + extra_headers: HeadersLikeOrCallable | None = None, + ) -> str: + """ + Perform the server side of the opening handshake. + + Args: + origins: List of acceptable values of the Origin HTTP header; + include :obj:`None` if the lack of an origin is acceptable. + extensions: List of supported extensions, in order in which they + should be tried. + subprotocols: List of supported subprotocols, in order of + decreasing preference. + extra_headers: Arbitrary HTTP headers to add to the response when + the handshake succeeds. + + Returns: + path of the URI of the request. + + Raises: + InvalidHandshake: If the handshake fails. + + """ + path, request_headers = await self.read_http_request() + + # Hook for customizing request handling, for example checking + # authentication or treating some paths as plain HTTP endpoints. + early_response_awaitable = self.process_request(path, request_headers) + if isinstance(early_response_awaitable, Awaitable): + early_response = await early_response_awaitable + else: + # For backwards compatibility with 7.0. + warnings.warn("declare process_request as a coroutine", DeprecationWarning) + early_response = early_response_awaitable + + # The connection may drop while process_request is running. + if self.state is State.CLOSED: + # This subclass of ConnectionError is silently ignored in handler(). + raise BrokenPipeError("connection closed during opening handshake") + + # Change the response to a 503 error if the server is shutting down. + if not self.ws_server.is_serving(): + early_response = ( + http.HTTPStatus.SERVICE_UNAVAILABLE, + [], + b"Server is shutting down.\n", + ) + + if early_response is not None: + raise AbortHandshake(*early_response) + + key = check_request(request_headers) + + self.origin = self.process_origin(request_headers, origins) + + extensions_header, self.extensions = self.process_extensions( + request_headers, available_extensions + ) + + protocol_header = self.subprotocol = self.process_subprotocol( + request_headers, available_subprotocols + ) + + response_headers = Headers() + + build_response(response_headers, key) + + if extensions_header is not None: + response_headers["Sec-WebSocket-Extensions"] = extensions_header + + if protocol_header is not None: + response_headers["Sec-WebSocket-Protocol"] = protocol_header + + if callable(extra_headers): + extra_headers = extra_headers(path, self.request_headers) + if extra_headers is not None: + response_headers.update(extra_headers) + + response_headers.setdefault("Date", email.utils.formatdate(usegmt=True)) + if self.server_header is not None: + response_headers.setdefault("Server", self.server_header) + + self.write_http_response(http.HTTPStatus.SWITCHING_PROTOCOLS, response_headers) + + self.logger.info("connection open") + + self.connection_open() + + return path + + +class WebSocketServer: + """ + WebSocket server returned by :func:`serve`. + + This class mirrors the API of :class:`~asyncio.Server`. + + It keeps track of WebSocket connections in order to close them properly + when shutting down. + + Args: + logger: Logger for this server. + It defaults to ``logging.getLogger("websockets.server")``. + See the :doc:`logging guide <../../topics/logging>` for details. + + """ + + def __init__(self, logger: LoggerLike | None = None) -> None: + if logger is None: + logger = logging.getLogger("websockets.server") + self.logger = logger + + # Keep track of active connections. + self.websockets: set[WebSocketServerProtocol] = set() + + # Task responsible for closing the server and terminating connections. + self.close_task: asyncio.Task[None] | None = None + + # Completed when the server is closed and connections are terminated. + self.closed_waiter: asyncio.Future[None] + + def wrap(self, server: asyncio.base_events.Server) -> None: + """ + Attach to a given :class:`~asyncio.Server`. + + Since :meth:`~asyncio.loop.create_server` doesn't support injecting a + custom ``Server`` class, the easiest solution that doesn't rely on + private :mod:`asyncio` APIs is to: + + - instantiate a :class:`WebSocketServer` + - give the protocol factory a reference to that instance + - call :meth:`~asyncio.loop.create_server` with the factory + - attach the resulting :class:`~asyncio.Server` with this method + + """ + self.server = server + for sock in server.sockets: + if sock.family == socket.AF_INET: + name = "%s:%d" % sock.getsockname() + elif sock.family == socket.AF_INET6: + name = "[%s]:%d" % sock.getsockname()[:2] + elif sock.family == socket.AF_UNIX: + name = sock.getsockname() + # In the unlikely event that someone runs websockets over a + # protocol other than IP or Unix sockets, avoid crashing. + else: # pragma: no cover + name = str(sock.getsockname()) + self.logger.info("server listening on %s", name) + + # Initialized here because we need a reference to the event loop. + # This should be moved back to __init__ when dropping Python < 3.10. + self.closed_waiter = server.get_loop().create_future() + + def register(self, protocol: WebSocketServerProtocol) -> None: + """ + Register a connection with this server. + + """ + self.websockets.add(protocol) + + def unregister(self, protocol: WebSocketServerProtocol) -> None: + """ + Unregister a connection with this server. + + """ + self.websockets.remove(protocol) + + def close(self, close_connections: bool = True) -> None: + """ + Close the server. + + * Close the underlying :class:`~asyncio.Server`. + * When ``close_connections`` is :obj:`True`, which is the default, + close existing connections. Specifically: + + * Reject opening WebSocket connections with an HTTP 503 (service + unavailable) error. This happens when the server accepted the TCP + connection but didn't complete the opening handshake before closing. + * Close open WebSocket connections with close code 1001 (going away). + + * Wait until all connection handlers terminate. + + :meth:`close` is idempotent. + + """ + if self.close_task is None: + self.close_task = self.get_loop().create_task( + self._close(close_connections) + ) + + async def _close(self, close_connections: bool) -> None: + """ + Implementation of :meth:`close`. + + This calls :meth:`~asyncio.Server.close` on the underlying + :class:`~asyncio.Server` object to stop accepting new connections and + then closes open connections with close code 1001. + + """ + self.logger.info("server closing") + + # Stop accepting new connections. + self.server.close() + + # Wait until all accepted connections reach connection_made() and call + # register(). See https://github.com/python/cpython/issues/79033 for + # details. This workaround can be removed when dropping Python < 3.11. + await asyncio.sleep(0) + + if close_connections: + # Close OPEN connections with close code 1001. After server.close(), + # handshake() closes OPENING connections with an HTTP 503 error. + close_tasks = [ + asyncio.create_task(websocket.close(1001)) + for websocket in self.websockets + if websocket.state is not State.CONNECTING + ] + # asyncio.wait doesn't accept an empty first argument. + if close_tasks: + await asyncio.wait(close_tasks) + + # Wait until all TCP connections are closed. + await self.server.wait_closed() + + # Wait until all connection handlers terminate. + # asyncio.wait doesn't accept an empty first argument. + if self.websockets: + await asyncio.wait( + [websocket.handler_task for websocket in self.websockets] + ) + + # Tell wait_closed() to return. + self.closed_waiter.set_result(None) + + self.logger.info("server closed") + + async def wait_closed(self) -> None: + """ + Wait until the server is closed. + + When :meth:`wait_closed` returns, all TCP connections are closed and + all connection handlers have returned. + + To ensure a fast shutdown, a connection handler should always be + awaiting at least one of: + + * :meth:`~WebSocketServerProtocol.recv`: when the connection is closed, + it raises :exc:`~websockets.exceptions.ConnectionClosedOK`; + * :meth:`~WebSocketServerProtocol.wait_closed`: when the connection is + closed, it returns. + + Then the connection handler is immediately notified of the shutdown; + it can clean up and exit. + + """ + await asyncio.shield(self.closed_waiter) + + def get_loop(self) -> asyncio.AbstractEventLoop: + """ + See :meth:`asyncio.Server.get_loop`. + + """ + return self.server.get_loop() + + def is_serving(self) -> bool: + """ + See :meth:`asyncio.Server.is_serving`. + + """ + return self.server.is_serving() + + async def start_serving(self) -> None: # pragma: no cover + """ + See :meth:`asyncio.Server.start_serving`. + + Typical use:: + + server = await serve(..., start_serving=False) + # perform additional setup here... + # ... then start the server + await server.start_serving() + + """ + await self.server.start_serving() + + async def serve_forever(self) -> None: # pragma: no cover + """ + See :meth:`asyncio.Server.serve_forever`. + + Typical use:: + + server = await serve(...) + # this coroutine doesn't return + # canceling it stops the server + await server.serve_forever() + + This is an alternative to using :func:`serve` as an asynchronous context + manager. Shutdown is triggered by canceling :meth:`serve_forever` + instead of exiting a :func:`serve` context. + + """ + await self.server.serve_forever() + + @property + def sockets(self) -> Iterable[socket.socket]: + """ + See :attr:`asyncio.Server.sockets`. + + """ + return self.server.sockets + + async def __aenter__(self) -> WebSocketServer: # pragma: no cover + return self + + async def __aexit__( + self, + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + traceback: TracebackType | None, + ) -> None: # pragma: no cover + self.close() + await self.wait_closed() + + +class Serve: + """ + Start a WebSocket server listening on ``host`` and ``port``. + + Whenever a client connects, the server creates a + :class:`WebSocketServerProtocol`, performs the opening handshake, and + delegates to the connection handler, ``ws_handler``. + + The handler receives the :class:`WebSocketServerProtocol` and uses it to + send and receive messages. + + Once the handler completes, either normally or with an exception, the + server performs the closing handshake and closes the connection. + + Awaiting :func:`serve` yields a :class:`WebSocketServer`. This object + provides a :meth:`~WebSocketServer.close` method to shut down the server:: + + # set this future to exit the server + stop = asyncio.get_running_loop().create_future() + + server = await serve(...) + await stop + server.close() + await server.wait_closed() + + :func:`serve` can be used as an asynchronous context manager. Then, the + server is shut down automatically when exiting the context:: + + # set this future to exit the server + stop = asyncio.get_running_loop().create_future() + + async with serve(...): + await stop + + Args: + ws_handler: Connection handler. It receives the WebSocket connection, + which is a :class:`WebSocketServerProtocol`, in argument. + host: Network interfaces the server binds to. + See :meth:`~asyncio.loop.create_server` for details. + port: TCP port the server listens on. + See :meth:`~asyncio.loop.create_server` for details. + create_protocol: Factory for the :class:`asyncio.Protocol` managing + the connection. It defaults to :class:`WebSocketServerProtocol`. + Set it to a wrapper or a subclass to customize connection handling. + logger: Logger for this server. + It defaults to ``logging.getLogger("websockets.server")``. + See the :doc:`logging guide <../../topics/logging>` for details. + compression: The "permessage-deflate" extension is enabled by default. + Set ``compression`` to :obj:`None` to disable it. See the + :doc:`compression guide <../../topics/compression>` for details. + origins: Acceptable values of the ``Origin`` header, for defending + against Cross-Site WebSocket Hijacking attacks. Include :obj:`None` + in the list if the lack of an origin is acceptable. + extensions: List of supported extensions, in order in which they + should be negotiated and run. + subprotocols: List of supported subprotocols, in order of decreasing + preference. + extra_headers (HeadersLike | Callable[[str, Headers] | HeadersLike]): + Arbitrary HTTP headers to add to the response. This can be + a :data:`~websockets.datastructures.HeadersLike` or a callable + taking the request path and headers in arguments and returning + a :data:`~websockets.datastructures.HeadersLike`. + server_header: Value of the ``Server`` response header. + It defaults to ``"Python/x.y.z websockets/X.Y"``. + Setting it to :obj:`None` removes the header. + process_request (Callable[[str, Headers], \ + Awaitable[tuple[StatusLike, HeadersLike, bytes] | None]] | None): + Intercept HTTP request before the opening handshake. + See :meth:`~WebSocketServerProtocol.process_request` for details. + select_subprotocol: Select a subprotocol supported by the client. + See :meth:`~WebSocketServerProtocol.select_subprotocol` for details. + open_timeout: Timeout for opening connections in seconds. + :obj:`None` disables the timeout. + + See :class:`~websockets.legacy.protocol.WebSocketCommonProtocol` for the + documentation of ``ping_interval``, ``ping_timeout``, ``close_timeout``, + ``max_size``, ``max_queue``, ``read_limit``, and ``write_limit``. + + Any other keyword arguments are passed the event loop's + :meth:`~asyncio.loop.create_server` method. + + For example: + + * You can set ``ssl`` to a :class:`~ssl.SSLContext` to enable TLS. + + * You can set ``sock`` to a :obj:`~socket.socket` that you created + outside of websockets. + + Returns: + WebSocket server. + + """ + + def __init__( + self, + # The version that accepts the path in the second argument is deprecated. + ws_handler: ( + Callable[[WebSocketServerProtocol], Awaitable[Any]] + | Callable[[WebSocketServerProtocol, str], Awaitable[Any]] + ), + host: str | Sequence[str] | None = None, + port: int | None = None, + *, + create_protocol: Callable[..., WebSocketServerProtocol] | None = None, + logger: LoggerLike | None = None, + compression: str | None = "deflate", + origins: Sequence[Origin | None] | None = None, + extensions: Sequence[ServerExtensionFactory] | None = None, + subprotocols: Sequence[Subprotocol] | None = None, + extra_headers: HeadersLikeOrCallable | None = None, + server_header: str | None = SERVER, + process_request: ( + Callable[[str, Headers], Awaitable[HTTPResponse | None]] | None + ) = None, + select_subprotocol: ( + Callable[[Sequence[Subprotocol], Sequence[Subprotocol]], Subprotocol] | None + ) = None, + open_timeout: float | None = 10, + ping_interval: float | None = 20, + ping_timeout: float | None = 20, + close_timeout: float | None = None, + max_size: int | None = 2**20, + max_queue: int | None = 2**5, + read_limit: int = 2**16, + write_limit: int = 2**16, + **kwargs: Any, + ) -> None: + # Backwards compatibility: close_timeout used to be called timeout. + timeout: float | None = kwargs.pop("timeout", None) + if timeout is None: + timeout = 10 + else: + warnings.warn("rename timeout to close_timeout", DeprecationWarning) + # If both are specified, timeout is ignored. + if close_timeout is None: + close_timeout = timeout + + # Backwards compatibility: create_protocol used to be called klass. + klass: type[WebSocketServerProtocol] | None = kwargs.pop("klass", None) + if klass is None: + klass = WebSocketServerProtocol + else: + warnings.warn("rename klass to create_protocol", DeprecationWarning) + # If both are specified, klass is ignored. + if create_protocol is None: + create_protocol = klass + + # Backwards compatibility: recv() used to return None on closed connections + legacy_recv: bool = kwargs.pop("legacy_recv", False) + + # Backwards compatibility: the loop parameter used to be supported. + _loop: asyncio.AbstractEventLoop | None = kwargs.pop("loop", None) + if _loop is None: + loop = asyncio.get_event_loop() + else: + loop = _loop + warnings.warn("remove loop argument", DeprecationWarning) + + ws_server = WebSocketServer(logger=logger) + + secure = kwargs.get("ssl") is not None + + if compression == "deflate": + extensions = enable_server_permessage_deflate(extensions) + elif compression is not None: + raise ValueError(f"unsupported compression: {compression}") + + if subprotocols is not None: + validate_subprotocols(subprotocols) + + # Help mypy and avoid this error: "type[WebSocketServerProtocol] | + # Callable[..., WebSocketServerProtocol]" not callable [misc] + create_protocol = cast(Callable[..., WebSocketServerProtocol], create_protocol) + factory = functools.partial( + create_protocol, + # For backwards compatibility with 10.0 or earlier. Done here in + # addition to WebSocketServerProtocol to trigger the deprecation + # warning once per serve() call rather than once per connection. + remove_path_argument(ws_handler), + ws_server, + host=host, + port=port, + secure=secure, + open_timeout=open_timeout, + ping_interval=ping_interval, + ping_timeout=ping_timeout, + close_timeout=close_timeout, + max_size=max_size, + max_queue=max_queue, + read_limit=read_limit, + write_limit=write_limit, + loop=_loop, + legacy_recv=legacy_recv, + origins=origins, + extensions=extensions, + subprotocols=subprotocols, + extra_headers=extra_headers, + server_header=server_header, + process_request=process_request, + select_subprotocol=select_subprotocol, + logger=logger, + ) + + if kwargs.pop("unix", False): + path: str | None = kwargs.pop("path", None) + # unix_serve(path) must not specify host and port parameters. + assert host is None and port is None + create_server = functools.partial( + loop.create_unix_server, factory, path, **kwargs + ) + else: + create_server = functools.partial( + loop.create_server, factory, host, port, **kwargs + ) + + # This is a coroutine function. + self._create_server = create_server + self.ws_server = ws_server + + # async with serve(...) + + async def __aenter__(self) -> WebSocketServer: + return await self + + async def __aexit__( + self, + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + traceback: TracebackType | None, + ) -> None: + self.ws_server.close() + await self.ws_server.wait_closed() + + # await serve(...) + + def __await__(self) -> Generator[Any, None, WebSocketServer]: + # Create a suitable iterator by calling __await__ on a coroutine. + return self.__await_impl__().__await__() + + async def __await_impl__(self) -> WebSocketServer: + server = await self._create_server() + self.ws_server.wrap(server) + return self.ws_server + + # yield from serve(...) - remove when dropping Python < 3.10 + + __iter__ = __await__ + + +serve = Serve + + +def unix_serve( + # The version that accepts the path in the second argument is deprecated. + ws_handler: ( + Callable[[WebSocketServerProtocol], Awaitable[Any]] + | Callable[[WebSocketServerProtocol, str], Awaitable[Any]] + ), + path: str | None = None, + **kwargs: Any, +) -> Serve: + """ + Start a WebSocket server listening on a Unix socket. + + This function is identical to :func:`serve`, except the ``host`` and + ``port`` arguments are replaced by ``path``. It is only available on Unix. + + Unrecognized keyword arguments are passed the event loop's + :meth:`~asyncio.loop.create_unix_server` method. + + It's useful for deploying a server behind a reverse proxy such as nginx. + + Args: + path: File system path to the Unix socket. + + """ + return serve(ws_handler, path=path, unix=True, **kwargs) + + +def remove_path_argument( + ws_handler: ( + Callable[[WebSocketServerProtocol], Awaitable[Any]] + | Callable[[WebSocketServerProtocol, str], Awaitable[Any]] + ), +) -> Callable[[WebSocketServerProtocol], Awaitable[Any]]: + try: + inspect.signature(ws_handler).bind(None) + except TypeError: + try: + inspect.signature(ws_handler).bind(None, "") + except TypeError: # pragma: no cover + # ws_handler accepts neither one nor two arguments; leave it alone. + pass + else: + # ws_handler accepts two arguments; activate backwards compatibility. + warnings.warn("remove second argument of ws_handler", DeprecationWarning) + + async def _ws_handler(websocket: WebSocketServerProtocol) -> Any: + return await cast( + Callable[[WebSocketServerProtocol, str], Awaitable[Any]], + ws_handler, + )(websocket, websocket.path) + + return _ws_handler + + return cast( + Callable[[WebSocketServerProtocol], Awaitable[Any]], + ws_handler, + ) diff --git a/hackaton/lib/python3.12/site-packages/websockets/protocol.py b/hackaton/lib/python3.12/site-packages/websockets/protocol.py new file mode 100644 index 0000000..8751ebd --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/websockets/protocol.py @@ -0,0 +1,732 @@ +from __future__ import annotations + +import enum +import logging +import uuid +from typing import Generator, Union + +from .exceptions import ( + ConnectionClosed, + ConnectionClosedError, + ConnectionClosedOK, + InvalidState, + PayloadTooBig, + ProtocolError, +) +from .extensions import Extension +from .frames import ( + OK_CLOSE_CODES, + OP_BINARY, + OP_CLOSE, + OP_CONT, + OP_PING, + OP_PONG, + OP_TEXT, + Close, + CloseCode, + Frame, +) +from .http11 import Request, Response +from .streams import StreamReader +from .typing import LoggerLike, Origin, Subprotocol + + +__all__ = [ + "Protocol", + "Side", + "State", + "SEND_EOF", +] + +# Change to Request | Response | Frame when dropping Python < 3.10. +Event = Union[Request, Response, Frame] +"""Events that :meth:`~Protocol.events_received` may return.""" + + +class Side(enum.IntEnum): + """A WebSocket connection is either a server or a client.""" + + SERVER, CLIENT = range(2) + + +SERVER = Side.SERVER +CLIENT = Side.CLIENT + + +class State(enum.IntEnum): + """A WebSocket connection is in one of these four states.""" + + CONNECTING, OPEN, CLOSING, CLOSED = range(4) + + +CONNECTING = State.CONNECTING +OPEN = State.OPEN +CLOSING = State.CLOSING +CLOSED = State.CLOSED + + +SEND_EOF = b"" +"""Sentinel signaling that the TCP connection must be half-closed.""" + + +class Protocol: + """ + Sans-I/O implementation of a WebSocket connection. + + Args: + side: :attr:`~Side.CLIENT` or :attr:`~Side.SERVER`. + state: Initial state of the WebSocket connection. + max_size: Maximum size of incoming messages in bytes; + :obj:`None` disables the limit. + logger: Logger for this connection; depending on ``side``, + defaults to ``logging.getLogger("websockets.client")`` + or ``logging.getLogger("websockets.server")``; + see the :doc:`logging guide <../../topics/logging>` for details. + + """ + + def __init__( + self, + side: Side, + *, + state: State = OPEN, + max_size: int | None = 2**20, + logger: LoggerLike | None = None, + ) -> None: + # Unique identifier. For logs. + self.id: uuid.UUID = uuid.uuid4() + """Unique identifier of the connection. Useful in logs.""" + + # Logger or LoggerAdapter for this connection. + if logger is None: + logger = logging.getLogger(f"websockets.{side.name.lower()}") + self.logger: LoggerLike = logger + """Logger for this connection.""" + + # Track if DEBUG is enabled. Shortcut logging calls if it isn't. + self.debug = logger.isEnabledFor(logging.DEBUG) + + # Connection side. CLIENT or SERVER. + self.side = side + + # Connection state. Initially OPEN because subclasses handle CONNECTING. + self.state = state + + # Maximum size of incoming messages in bytes. + self.max_size = max_size + + # Current size of incoming message in bytes. Only set while reading a + # fragmented message i.e. a data frames with the FIN bit not set. + self.cur_size: int | None = None + + # True while sending a fragmented message i.e. a data frames with the + # FIN bit not set. + self.expect_continuation_frame = False + + # WebSocket protocol parameters. + self.origin: Origin | None = None + self.extensions: list[Extension] = [] + self.subprotocol: Subprotocol | None = None + + # Close code and reason, set when a close frame is sent or received. + self.close_rcvd: Close | None = None + self.close_sent: Close | None = None + self.close_rcvd_then_sent: bool | None = None + + # Track if an exception happened during the handshake. + self.handshake_exc: Exception | None = None + """ + Exception to raise if the opening handshake failed. + + :obj:`None` if the opening handshake succeeded. + + """ + + # Track if send_eof() was called. + self.eof_sent = False + + # Parser state. + self.reader = StreamReader() + self.events: list[Event] = [] + self.writes: list[bytes] = [] + self.parser = self.parse() + next(self.parser) # start coroutine + self.parser_exc: Exception | None = None + + @property + def state(self) -> State: + """ + State of the WebSocket connection. + + Defined in 4.1, 4.2, 7.1.3, and 7.1.4 of :rfc:`6455`. + + """ + return self._state + + @state.setter + def state(self, state: State) -> None: + if self.debug: + self.logger.debug("= connection is %s", state.name) + self._state = state + + @property + def close_code(self) -> int | None: + """ + `WebSocket close code`_. + + .. _WebSocket close code: + https://datatracker.ietf.org/doc/html/rfc6455#section-7.1.5 + + :obj:`None` if the connection isn't closed yet. + + """ + if self.state is not CLOSED: + return None + elif self.close_rcvd is None: + return CloseCode.ABNORMAL_CLOSURE + else: + return self.close_rcvd.code + + @property + def close_reason(self) -> str | None: + """ + `WebSocket close reason`_. + + .. _WebSocket close reason: + https://datatracker.ietf.org/doc/html/rfc6455#section-7.1.6 + + :obj:`None` if the connection isn't closed yet. + + """ + if self.state is not CLOSED: + return None + elif self.close_rcvd is None: + return "" + else: + return self.close_rcvd.reason + + @property + def close_exc(self) -> ConnectionClosed: + """ + Exception to raise when trying to interact with a closed connection. + + Don't raise this exception while the connection :attr:`state` + is :attr:`~websockets.protocol.State.CLOSING`; wait until + it's :attr:`~websockets.protocol.State.CLOSED`. + + Indeed, the exception includes the close code and reason, which are + known only once the connection is closed. + + Raises: + AssertionError: If the connection isn't closed yet. + + """ + assert self.state is CLOSED, "connection isn't closed yet" + exc_type: type[ConnectionClosed] + if ( + self.close_rcvd is not None + and self.close_sent is not None + and self.close_rcvd.code in OK_CLOSE_CODES + and self.close_sent.code in OK_CLOSE_CODES + ): + exc_type = ConnectionClosedOK + else: + exc_type = ConnectionClosedError + exc: ConnectionClosed = exc_type( + self.close_rcvd, + self.close_sent, + self.close_rcvd_then_sent, + ) + # Chain to the exception raised in the parser, if any. + exc.__cause__ = self.parser_exc + return exc + + # Public methods for receiving data. + + def receive_data(self, data: bytes) -> None: + """ + Receive data from the network. + + After calling this method: + + - You must call :meth:`data_to_send` and send this data to the network. + - You should call :meth:`events_received` and process resulting events. + + Raises: + EOFError: If :meth:`receive_eof` was called earlier. + + """ + self.reader.feed_data(data) + next(self.parser) + + def receive_eof(self) -> None: + """ + Receive the end of the data stream from the network. + + After calling this method: + + - You must call :meth:`data_to_send` and send this data to the network; + it will return ``[b""]``, signaling the end of the stream, or ``[]``. + - You aren't expected to call :meth:`events_received`; it won't return + any new events. + + :meth:`receive_eof` is idempotent. + + """ + if self.reader.eof: + return + self.reader.feed_eof() + next(self.parser) + + # Public methods for sending events. + + def send_continuation(self, data: bytes, fin: bool) -> None: + """ + Send a `Continuation frame`_. + + .. _Continuation frame: + https://datatracker.ietf.org/doc/html/rfc6455#section-5.6 + + Parameters: + data: payload containing the same kind of data + as the initial frame. + fin: FIN bit; set it to :obj:`True` if this is the last frame + of a fragmented message and to :obj:`False` otherwise. + + Raises: + ProtocolError: If a fragmented message isn't in progress. + + """ + if not self.expect_continuation_frame: + raise ProtocolError("unexpected continuation frame") + if self._state is not OPEN: + raise InvalidState(f"connection is {self.state.name.lower()}") + self.expect_continuation_frame = not fin + self.send_frame(Frame(OP_CONT, data, fin)) + + def send_text(self, data: bytes, fin: bool = True) -> None: + """ + Send a `Text frame`_. + + .. _Text frame: + https://datatracker.ietf.org/doc/html/rfc6455#section-5.6 + + Parameters: + data: payload containing text encoded with UTF-8. + fin: FIN bit; set it to :obj:`False` if this is the first frame of + a fragmented message. + + Raises: + ProtocolError: If a fragmented message is in progress. + + """ + if self.expect_continuation_frame: + raise ProtocolError("expected a continuation frame") + if self._state is not OPEN: + raise InvalidState(f"connection is {self.state.name.lower()}") + self.expect_continuation_frame = not fin + self.send_frame(Frame(OP_TEXT, data, fin)) + + def send_binary(self, data: bytes, fin: bool = True) -> None: + """ + Send a `Binary frame`_. + + .. _Binary frame: + https://datatracker.ietf.org/doc/html/rfc6455#section-5.6 + + Parameters: + data: payload containing arbitrary binary data. + fin: FIN bit; set it to :obj:`False` if this is the first frame of + a fragmented message. + + Raises: + ProtocolError: If a fragmented message is in progress. + + """ + if self.expect_continuation_frame: + raise ProtocolError("expected a continuation frame") + if self._state is not OPEN: + raise InvalidState(f"connection is {self.state.name.lower()}") + self.expect_continuation_frame = not fin + self.send_frame(Frame(OP_BINARY, data, fin)) + + def send_close(self, code: int | None = None, reason: str = "") -> None: + """ + Send a `Close frame`_. + + .. _Close frame: + https://datatracker.ietf.org/doc/html/rfc6455#section-5.5.1 + + Parameters: + code: close code. + reason: close reason. + + Raises: + ProtocolError: If the code isn't valid or if a reason is provided + without a code. + + """ + # While RFC 6455 doesn't rule out sending more than one close Frame, + # websockets is conservative in what it sends and doesn't allow that. + if self._state is not OPEN: + raise InvalidState(f"connection is {self.state.name.lower()}") + if code is None: + if reason != "": + raise ProtocolError("cannot send a reason without a code") + close = Close(CloseCode.NO_STATUS_RCVD, "") + data = b"" + else: + close = Close(code, reason) + data = close.serialize() + # 7.1.3. The WebSocket Closing Handshake is Started + self.send_frame(Frame(OP_CLOSE, data)) + # Since the state is OPEN, no close frame was received yet. + # As a consequence, self.close_rcvd_then_sent remains None. + assert self.close_rcvd is None + self.close_sent = close + self.state = CLOSING + + def send_ping(self, data: bytes) -> None: + """ + Send a `Ping frame`_. + + .. _Ping frame: + https://datatracker.ietf.org/doc/html/rfc6455#section-5.5.2 + + Parameters: + data: payload containing arbitrary binary data. + + """ + # RFC 6455 allows control frames after starting the closing handshake. + if self._state is not OPEN and self._state is not CLOSING: + raise InvalidState(f"connection is {self.state.name.lower()}") + self.send_frame(Frame(OP_PING, data)) + + def send_pong(self, data: bytes) -> None: + """ + Send a `Pong frame`_. + + .. _Pong frame: + https://datatracker.ietf.org/doc/html/rfc6455#section-5.5.3 + + Parameters: + data: payload containing arbitrary binary data. + + """ + # RFC 6455 allows control frames after starting the closing handshake. + if self._state is not OPEN and self._state is not CLOSING: + raise InvalidState(f"connection is {self.state.name.lower()}") + self.send_frame(Frame(OP_PONG, data)) + + def fail(self, code: int, reason: str = "") -> None: + """ + `Fail the WebSocket connection`_. + + .. _Fail the WebSocket connection: + https://datatracker.ietf.org/doc/html/rfc6455#section-7.1.7 + + Parameters: + code: close code + reason: close reason + + Raises: + ProtocolError: If the code isn't valid. + """ + # 7.1.7. Fail the WebSocket Connection + + # Send a close frame when the state is OPEN (a close frame was already + # sent if it's CLOSING), except when failing the connection because + # of an error reading from or writing to the network. + if self.state is OPEN: + if code != CloseCode.ABNORMAL_CLOSURE: + close = Close(code, reason) + data = close.serialize() + self.send_frame(Frame(OP_CLOSE, data)) + self.close_sent = close + # If recv_messages() raised an exception upon receiving a close + # frame but before echoing it, then close_rcvd is not None even + # though the state is OPEN. This happens when the connection is + # closed while receiving a fragmented message. + if self.close_rcvd is not None: + self.close_rcvd_then_sent = True + self.state = CLOSING + + # When failing the connection, a server closes the TCP connection + # without waiting for the client to complete the handshake, while a + # client waits for the server to close the TCP connection, possibly + # after sending a close frame that the client will ignore. + if self.side is SERVER and not self.eof_sent: + self.send_eof() + + # 7.1.7. Fail the WebSocket Connection "An endpoint MUST NOT continue + # to attempt to process data(including a responding Close frame) from + # the remote endpoint after being instructed to _Fail the WebSocket + # Connection_." + self.parser = self.discard() + next(self.parser) # start coroutine + + # Public method for getting incoming events after receiving data. + + def events_received(self) -> list[Event]: + """ + Fetch events generated from data received from the network. + + Call this method immediately after any of the ``receive_*()`` methods. + + Process resulting events, likely by passing them to the application. + + Returns: + Events read from the connection. + """ + events, self.events = self.events, [] + return events + + # Public method for getting outgoing data after receiving data or sending events. + + def data_to_send(self) -> list[bytes]: + """ + Obtain data to send to the network. + + Call this method immediately after any of the ``receive_*()``, + ``send_*()``, or :meth:`fail` methods. + + Write resulting data to the connection. + + The empty bytestring :data:`~websockets.protocol.SEND_EOF` signals + the end of the data stream. When you receive it, half-close the TCP + connection. + + Returns: + Data to write to the connection. + + """ + writes, self.writes = self.writes, [] + return writes + + def close_expected(self) -> bool: + """ + Tell if the TCP connection is expected to close soon. + + Call this method immediately after any of the ``receive_*()``, + ``send_close()``, or :meth:`fail` methods. + + If it returns :obj:`True`, schedule closing the TCP connection after a + short timeout if the other side hasn't already closed it. + + Returns: + Whether the TCP connection is expected to close soon. + + """ + # We expect a TCP close if and only if we sent a close frame: + # * Normal closure: once we send a close frame, we expect a TCP close: + # server waits for client to complete the TCP closing handshake; + # client waits for server to initiate the TCP closing handshake. + # * Abnormal closure: we always send a close frame and the same logic + # applies, except on EOFError where we don't send a close frame + # because we already received the TCP close, so we don't expect it. + # We already got a TCP Close if and only if the state is CLOSED. + return self.state is CLOSING or self.handshake_exc is not None + + # Private methods for receiving data. + + def parse(self) -> Generator[None, None, None]: + """ + Parse incoming data into frames. + + :meth:`receive_data` and :meth:`receive_eof` run this generator + coroutine until it needs more data or reaches EOF. + + :meth:`parse` never raises an exception. Instead, it sets the + :attr:`parser_exc` and yields control. + + """ + try: + while True: + if (yield from self.reader.at_eof()): + if self.debug: + self.logger.debug("< EOF") + # If the WebSocket connection is closed cleanly, with a + # closing handhshake, recv_frame() substitutes parse() + # with discard(). This branch is reached only when the + # connection isn't closed cleanly. + raise EOFError("unexpected end of stream") + + if self.max_size is None: + max_size = None + elif self.cur_size is None: + max_size = self.max_size + else: + max_size = self.max_size - self.cur_size + + # During a normal closure, execution ends here on the next + # iteration of the loop after receiving a close frame. At + # this point, recv_frame() replaced parse() by discard(). + frame = yield from Frame.parse( + self.reader.read_exact, + mask=self.side is SERVER, + max_size=max_size, + extensions=self.extensions, + ) + + if self.debug: + self.logger.debug("< %s", frame) + + self.recv_frame(frame) + + except ProtocolError as exc: + self.fail(CloseCode.PROTOCOL_ERROR, str(exc)) + self.parser_exc = exc + + except EOFError as exc: + self.fail(CloseCode.ABNORMAL_CLOSURE, str(exc)) + self.parser_exc = exc + + except UnicodeDecodeError as exc: + self.fail(CloseCode.INVALID_DATA, f"{exc.reason} at position {exc.start}") + self.parser_exc = exc + + except PayloadTooBig as exc: + self.fail(CloseCode.MESSAGE_TOO_BIG, str(exc)) + self.parser_exc = exc + + except Exception as exc: + self.logger.error("parser failed", exc_info=True) + # Don't include exception details, which may be security-sensitive. + self.fail(CloseCode.INTERNAL_ERROR) + self.parser_exc = exc + + # During an abnormal closure, execution ends here after catching an + # exception. At this point, fail() replaced parse() by discard(). + yield + raise AssertionError("parse() shouldn't step after error") + + def discard(self) -> Generator[None, None, None]: + """ + Discard incoming data. + + This coroutine replaces :meth:`parse`: + + - after receiving a close frame, during a normal closure (1.4); + - after sending a close frame, during an abnormal closure (7.1.7). + + """ + # After the opening handshake completes, the server closes the TCP + # connection in the same circumstances where discard() replaces parse(). + # The client closes it when it receives EOF from the server or times + # out. (The latter case cannot be handled in this Sans-I/O layer.) + assert (self.state == CONNECTING or self.side is SERVER) == (self.eof_sent) + while not (yield from self.reader.at_eof()): + self.reader.discard() + if self.debug: + self.logger.debug("< EOF") + # A server closes the TCP connection immediately, while a client + # waits for the server to close the TCP connection. + if self.state != CONNECTING and self.side is CLIENT: + self.send_eof() + self.state = CLOSED + # If discard() completes normally, execution ends here. + yield + # Once the reader reaches EOF, its feed_data/eof() methods raise an + # error, so our receive_data/eof() methods don't step the generator. + raise AssertionError("discard() shouldn't step after EOF") + + def recv_frame(self, frame: Frame) -> None: + """ + Process an incoming frame. + + """ + if frame.opcode is OP_TEXT or frame.opcode is OP_BINARY: + if self.cur_size is not None: + raise ProtocolError("expected a continuation frame") + if frame.fin: + self.cur_size = None + else: + self.cur_size = len(frame.data) + + elif frame.opcode is OP_CONT: + if self.cur_size is None: + raise ProtocolError("unexpected continuation frame") + if frame.fin: + self.cur_size = None + else: + self.cur_size += len(frame.data) + + elif frame.opcode is OP_PING: + # 5.5.2. Ping: "Upon receipt of a Ping frame, an endpoint MUST + # send a Pong frame in response" + pong_frame = Frame(OP_PONG, frame.data) + self.send_frame(pong_frame) + + elif frame.opcode is OP_PONG: + # 5.5.3 Pong: "A response to an unsolicited Pong frame is not + # expected." + pass + + elif frame.opcode is OP_CLOSE: + # 7.1.5. The WebSocket Connection Close Code + # 7.1.6. The WebSocket Connection Close Reason + self.close_rcvd = Close.parse(frame.data) + if self.state is CLOSING: + assert self.close_sent is not None + self.close_rcvd_then_sent = False + + if self.cur_size is not None: + raise ProtocolError("incomplete fragmented message") + + # 5.5.1 Close: "If an endpoint receives a Close frame and did + # not previously send a Close frame, the endpoint MUST send a + # Close frame in response. (When sending a Close frame in + # response, the endpoint typically echos the status code it + # received.)" + + if self.state is OPEN: + # Echo the original data instead of re-serializing it with + # Close.serialize() because that fails when the close frame + # is empty and Close.parse() synthesizes a 1005 close code. + # The rest is identical to send_close(). + self.send_frame(Frame(OP_CLOSE, frame.data)) + self.close_sent = self.close_rcvd + self.close_rcvd_then_sent = True + self.state = CLOSING + + # 7.1.2. Start the WebSocket Closing Handshake: "Once an + # endpoint has both sent and received a Close control frame, + # that endpoint SHOULD _Close the WebSocket Connection_" + + # A server closes the TCP connection immediately, while a client + # waits for the server to close the TCP connection. + if self.side is SERVER: + self.send_eof() + + # 1.4. Closing Handshake: "after receiving a control frame + # indicating the connection should be closed, a peer discards + # any further data received." + # RFC 6455 allows reading Ping and Pong frames after a Close frame. + # However, that doesn't seem useful; websockets doesn't support it. + self.parser = self.discard() + next(self.parser) # start coroutine + + else: + # This can't happen because Frame.parse() validates opcodes. + raise AssertionError(f"unexpected opcode: {frame.opcode:02x}") + + self.events.append(frame) + + # Private methods for sending events. + + def send_frame(self, frame: Frame) -> None: + if self.debug: + self.logger.debug("> %s", frame) + self.writes.append( + frame.serialize( + mask=self.side is CLIENT, + extensions=self.extensions, + ) + ) + + def send_eof(self) -> None: + assert not self.eof_sent + self.eof_sent = True + if self.debug: + self.logger.debug("> EOF") + self.writes.append(SEND_EOF) diff --git a/hackaton/lib/python3.12/site-packages/websockets/py.typed b/hackaton/lib/python3.12/site-packages/websockets/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/hackaton/lib/python3.12/site-packages/websockets/server.py b/hackaton/lib/python3.12/site-packages/websockets/server.py new file mode 100644 index 0000000..006d5bd --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/websockets/server.py @@ -0,0 +1,587 @@ +from __future__ import annotations + +import base64 +import binascii +import email.utils +import http +import warnings +from typing import Any, Callable, Generator, Sequence, cast + +from .datastructures import Headers, MultipleValuesError +from .exceptions import ( + InvalidHandshake, + InvalidHeader, + InvalidHeaderValue, + InvalidOrigin, + InvalidStatus, + InvalidUpgrade, + NegotiationError, +) +from .extensions import Extension, ServerExtensionFactory +from .headers import ( + build_extension, + parse_connection, + parse_extension, + parse_subprotocol, + parse_upgrade, +) +from .http11 import Request, Response +from .protocol import CONNECTING, OPEN, SERVER, Protocol, State +from .typing import ( + ConnectionOption, + ExtensionHeader, + LoggerLike, + Origin, + StatusLike, + Subprotocol, + UpgradeProtocol, +) +from .utils import accept_key + + +# See #940 for why lazy_import isn't used here for backwards compatibility. +# See #1400 for why listing compatibility imports in __all__ helps PyCharm. +from .legacy.server import * # isort:skip # noqa: I001 +from .legacy.server import __all__ as legacy__all__ + + +__all__ = ["ServerProtocol"] + legacy__all__ + + +class ServerProtocol(Protocol): + """ + Sans-I/O implementation of a WebSocket server connection. + + Args: + origins: Acceptable values of the ``Origin`` header; include + :obj:`None` in the list if the lack of an origin is acceptable. + This is useful for defending against Cross-Site WebSocket + Hijacking attacks. + extensions: List of supported extensions, in order in which they + should be tried. + subprotocols: List of supported subprotocols, in order of decreasing + preference. + select_subprotocol: Callback for selecting a subprotocol among + those supported by the client and the server. It has the same + signature as the :meth:`select_subprotocol` method, including a + :class:`ServerProtocol` instance as first argument. + state: Initial state of the WebSocket connection. + max_size: Maximum size of incoming messages in bytes; + :obj:`None` disables the limit. + logger: Logger for this connection; + defaults to ``logging.getLogger("websockets.server")``; + see the :doc:`logging guide <../../topics/logging>` for details. + + """ + + def __init__( + self, + *, + origins: Sequence[Origin | None] | None = None, + extensions: Sequence[ServerExtensionFactory] | None = None, + subprotocols: Sequence[Subprotocol] | None = None, + select_subprotocol: ( + Callable[ + [ServerProtocol, Sequence[Subprotocol]], + Subprotocol | None, + ] + | None + ) = None, + state: State = CONNECTING, + max_size: int | None = 2**20, + logger: LoggerLike | None = None, + ) -> None: + super().__init__( + side=SERVER, + state=state, + max_size=max_size, + logger=logger, + ) + self.origins = origins + self.available_extensions = extensions + self.available_subprotocols = subprotocols + if select_subprotocol is not None: + # Bind select_subprotocol then shadow self.select_subprotocol. + # Use setattr to work around https://github.com/python/mypy/issues/2427. + setattr( + self, + "select_subprotocol", + select_subprotocol.__get__(self, self.__class__), + ) + + def accept(self, request: Request) -> Response: + """ + Create a handshake response to accept the connection. + + If the handshake request is valid and the handshake successful, + :meth:`accept` returns an HTTP response with status code 101. + + Else, it returns an HTTP response with another status code. This rejects + the connection, like :meth:`reject` would. + + You must send the handshake response with :meth:`send_response`. + + You may modify the response before sending it, typically by adding HTTP + headers. + + Args: + request: WebSocket handshake request received from the client. + + Returns: + WebSocket handshake response or HTTP response to send to the client. + + """ + try: + ( + accept_header, + extensions_header, + protocol_header, + ) = self.process_request(request) + except InvalidOrigin as exc: + request._exception = exc + self.handshake_exc = exc + if self.debug: + self.logger.debug("! invalid origin", exc_info=True) + return self.reject( + http.HTTPStatus.FORBIDDEN, + f"Failed to open a WebSocket connection: {exc}.\n", + ) + except InvalidUpgrade as exc: + request._exception = exc + self.handshake_exc = exc + if self.debug: + self.logger.debug("! invalid upgrade", exc_info=True) + response = self.reject( + http.HTTPStatus.UPGRADE_REQUIRED, + ( + f"Failed to open a WebSocket connection: {exc}.\n" + f"\n" + f"You cannot access a WebSocket server directly " + f"with a browser. You need a WebSocket client.\n" + ), + ) + response.headers["Upgrade"] = "websocket" + return response + except InvalidHandshake as exc: + request._exception = exc + self.handshake_exc = exc + if self.debug: + self.logger.debug("! invalid handshake", exc_info=True) + exc_chain = cast(BaseException, exc) + exc_str = f"{exc_chain}" + while exc_chain.__cause__ is not None: + exc_chain = exc_chain.__cause__ + exc_str += f"; {exc_chain}" + return self.reject( + http.HTTPStatus.BAD_REQUEST, + f"Failed to open a WebSocket connection: {exc_str}.\n", + ) + except Exception as exc: + # Handle exceptions raised by user-provided select_subprotocol and + # unexpected errors. + request._exception = exc + self.handshake_exc = exc + self.logger.error("opening handshake failed", exc_info=True) + return self.reject( + http.HTTPStatus.INTERNAL_SERVER_ERROR, + ( + "Failed to open a WebSocket connection.\n" + "See server log for more information.\n" + ), + ) + + headers = Headers() + + headers["Date"] = email.utils.formatdate(usegmt=True) + + headers["Upgrade"] = "websocket" + headers["Connection"] = "Upgrade" + headers["Sec-WebSocket-Accept"] = accept_header + + if extensions_header is not None: + headers["Sec-WebSocket-Extensions"] = extensions_header + + if protocol_header is not None: + headers["Sec-WebSocket-Protocol"] = protocol_header + + return Response(101, "Switching Protocols", headers) + + def process_request( + self, + request: Request, + ) -> tuple[str, str | None, str | None]: + """ + Check a handshake request and negotiate extensions and subprotocol. + + This function doesn't verify that the request is an HTTP/1.1 or higher + GET request and doesn't check the ``Host`` header. These controls are + usually performed earlier in the HTTP request handling code. They're + the responsibility of the caller. + + Args: + request: WebSocket handshake request received from the client. + + Returns: + ``Sec-WebSocket-Accept``, ``Sec-WebSocket-Extensions``, and + ``Sec-WebSocket-Protocol`` headers for the handshake response. + + Raises: + InvalidHandshake: If the handshake request is invalid; + then the server must return 400 Bad Request error. + + """ + headers = request.headers + + connection: list[ConnectionOption] = sum( + [parse_connection(value) for value in headers.get_all("Connection")], [] + ) + + if not any(value.lower() == "upgrade" for value in connection): + raise InvalidUpgrade( + "Connection", ", ".join(connection) if connection else None + ) + + upgrade: list[UpgradeProtocol] = sum( + [parse_upgrade(value) for value in headers.get_all("Upgrade")], [] + ) + + # For compatibility with non-strict implementations, ignore case when + # checking the Upgrade header. The RFC always uses "websocket", except + # in section 11.2. (IANA registration) where it uses "WebSocket". + if not (len(upgrade) == 1 and upgrade[0].lower() == "websocket"): + raise InvalidUpgrade("Upgrade", ", ".join(upgrade) if upgrade else None) + + try: + key = headers["Sec-WebSocket-Key"] + except KeyError: + raise InvalidHeader("Sec-WebSocket-Key") from None + except MultipleValuesError: + raise InvalidHeader("Sec-WebSocket-Key", "multiple values") from None + + try: + raw_key = base64.b64decode(key.encode(), validate=True) + except binascii.Error as exc: + raise InvalidHeaderValue("Sec-WebSocket-Key", key) from exc + if len(raw_key) != 16: + raise InvalidHeaderValue("Sec-WebSocket-Key", key) + + try: + version = headers["Sec-WebSocket-Version"] + except KeyError: + raise InvalidHeader("Sec-WebSocket-Version") from None + except MultipleValuesError: + raise InvalidHeader("Sec-WebSocket-Version", "multiple values") from None + + if version != "13": + raise InvalidHeaderValue("Sec-WebSocket-Version", version) + + accept_header = accept_key(key) + + self.origin = self.process_origin(headers) + + extensions_header, self.extensions = self.process_extensions(headers) + + protocol_header = self.subprotocol = self.process_subprotocol(headers) + + return ( + accept_header, + extensions_header, + protocol_header, + ) + + def process_origin(self, headers: Headers) -> Origin | None: + """ + Handle the Origin HTTP request header. + + Args: + headers: WebSocket handshake request headers. + + Returns: + origin, if it is acceptable. + + Raises: + InvalidHandshake: If the Origin header is invalid. + InvalidOrigin: If the origin isn't acceptable. + + """ + # "The user agent MUST NOT include more than one Origin header field" + # per https://datatracker.ietf.org/doc/html/rfc6454#section-7.3. + try: + origin = headers.get("Origin") + except MultipleValuesError: + raise InvalidHeader("Origin", "multiple values") from None + if origin is not None: + origin = cast(Origin, origin) + if self.origins is not None: + if origin not in self.origins: + raise InvalidOrigin(origin) + return origin + + def process_extensions( + self, + headers: Headers, + ) -> tuple[str | None, list[Extension]]: + """ + Handle the Sec-WebSocket-Extensions HTTP request header. + + Accept or reject each extension proposed in the client request. + Negotiate parameters for accepted extensions. + + Per :rfc:`6455`, negotiation rules are defined by the specification of + each extension. + + To provide this level of flexibility, for each extension proposed by + the client, we check for a match with each extension available in the + server configuration. If no match is found, the extension is ignored. + + If several variants of the same extension are proposed by the client, + it may be accepted several times, which won't make sense in general. + Extensions must implement their own requirements. For this purpose, + the list of previously accepted extensions is provided. + + This process doesn't allow the server to reorder extensions. It can + only select a subset of the extensions proposed by the client. + + Other requirements, for example related to mandatory extensions or the + order of extensions, may be implemented by overriding this method. + + Args: + headers: WebSocket handshake request headers. + + Returns: + ``Sec-WebSocket-Extensions`` HTTP response header and list of + accepted extensions. + + Raises: + InvalidHandshake: If the Sec-WebSocket-Extensions header is invalid. + + """ + response_header_value: str | None = None + + extension_headers: list[ExtensionHeader] = [] + accepted_extensions: list[Extension] = [] + + header_values = headers.get_all("Sec-WebSocket-Extensions") + + if header_values and self.available_extensions: + parsed_header_values: list[ExtensionHeader] = sum( + [parse_extension(header_value) for header_value in header_values], [] + ) + + for name, request_params in parsed_header_values: + for ext_factory in self.available_extensions: + # Skip non-matching extensions based on their name. + if ext_factory.name != name: + continue + + # Skip non-matching extensions based on their params. + try: + response_params, extension = ext_factory.process_request_params( + request_params, accepted_extensions + ) + except NegotiationError: + continue + + # Add matching extension to the final list. + extension_headers.append((name, response_params)) + accepted_extensions.append(extension) + + # Break out of the loop once we have a match. + break + + # If we didn't break from the loop, no extension in our list + # matched what the client sent. The extension is declined. + + # Serialize extension header. + if extension_headers: + response_header_value = build_extension(extension_headers) + + return response_header_value, accepted_extensions + + def process_subprotocol(self, headers: Headers) -> Subprotocol | None: + """ + Handle the Sec-WebSocket-Protocol HTTP request header. + + Args: + headers: WebSocket handshake request headers. + + Returns: + Subprotocol, if one was selected; this is also the value of the + ``Sec-WebSocket-Protocol`` response header. + + Raises: + InvalidHandshake: If the Sec-WebSocket-Subprotocol header is invalid. + + """ + subprotocols: Sequence[Subprotocol] = sum( + [ + parse_subprotocol(header_value) + for header_value in headers.get_all("Sec-WebSocket-Protocol") + ], + [], + ) + + return self.select_subprotocol(subprotocols) + + def select_subprotocol( + self, + subprotocols: Sequence[Subprotocol], + ) -> Subprotocol | None: + """ + Pick a subprotocol among those offered by the client. + + If several subprotocols are supported by both the client and the server, + pick the first one in the list declared the server. + + If the server doesn't support any subprotocols, continue without a + subprotocol, regardless of what the client offers. + + If the server supports at least one subprotocol and the client doesn't + offer any, abort the handshake with an HTTP 400 error. + + You provide a ``select_subprotocol`` argument to :class:`ServerProtocol` + to override this logic. For example, you could accept the connection + even if client doesn't offer a subprotocol, rather than reject it. + + Here's how to negotiate the ``chat`` subprotocol if the client supports + it and continue without a subprotocol otherwise:: + + def select_subprotocol(protocol, subprotocols): + if "chat" in subprotocols: + return "chat" + + Args: + subprotocols: List of subprotocols offered by the client. + + Returns: + Selected subprotocol, if a common subprotocol was found. + + :obj:`None` to continue without a subprotocol. + + Raises: + NegotiationError: Custom implementations may raise this exception + to abort the handshake with an HTTP 400 error. + + """ + # Server doesn't offer any subprotocols. + if not self.available_subprotocols: # None or empty list + return None + + # Server offers at least one subprotocol but client doesn't offer any. + if not subprotocols: + raise NegotiationError("missing subprotocol") + + # Server and client both offer subprotocols. Look for a shared one. + proposed_subprotocols = set(subprotocols) + for subprotocol in self.available_subprotocols: + if subprotocol in proposed_subprotocols: + return subprotocol + + # No common subprotocol was found. + raise NegotiationError( + "invalid subprotocol; expected one of " + + ", ".join(self.available_subprotocols) + ) + + def reject(self, status: StatusLike, text: str) -> Response: + """ + Create a handshake response to reject the connection. + + A short plain text response is the best fallback when failing to + establish a WebSocket connection. + + You must send the handshake response with :meth:`send_response`. + + You may modify the response before sending it, for example by changing + HTTP headers. + + Args: + status: HTTP status code. + text: HTTP response body; it will be encoded to UTF-8. + + Returns: + HTTP response to send to the client. + + """ + # If status is an int instead of an HTTPStatus, fix it automatically. + status = http.HTTPStatus(status) + body = text.encode() + headers = Headers( + [ + ("Date", email.utils.formatdate(usegmt=True)), + ("Connection", "close"), + ("Content-Length", str(len(body))), + ("Content-Type", "text/plain; charset=utf-8"), + ] + ) + return Response(status.value, status.phrase, headers, body) + + def send_response(self, response: Response) -> None: + """ + Send a handshake response to the client. + + Args: + response: WebSocket handshake response event to send. + + """ + if self.debug: + code, phrase = response.status_code, response.reason_phrase + self.logger.debug("> HTTP/1.1 %d %s", code, phrase) + for key, value in response.headers.raw_items(): + self.logger.debug("> %s: %s", key, value) + if response.body is not None: + self.logger.debug("> [body] (%d bytes)", len(response.body)) + + self.writes.append(response.serialize()) + + if response.status_code == 101: + assert self.state is CONNECTING + self.state = OPEN + self.logger.info("connection open") + + else: + # handshake_exc may be already set if accept() encountered an error. + # If the connection isn't open, set handshake_exc to guarantee that + # handshake_exc is None if and only if opening handshake succeeded. + if self.handshake_exc is None: + self.handshake_exc = InvalidStatus(response) + self.logger.info( + "connection rejected (%d %s)", + response.status_code, + response.reason_phrase, + ) + + self.send_eof() + self.parser = self.discard() + next(self.parser) # start coroutine + + def parse(self) -> Generator[None, None, None]: + if self.state is CONNECTING: + try: + request = yield from Request.parse( + self.reader.read_line, + ) + except Exception as exc: + self.handshake_exc = exc + self.send_eof() + self.parser = self.discard() + next(self.parser) # start coroutine + yield + + if self.debug: + self.logger.debug("< GET %s HTTP/1.1", request.path) + for key, value in request.headers.raw_items(): + self.logger.debug("< %s: %s", key, value) + + self.events.append(request) + + yield from super().parse() + + +class ServerConnection(ServerProtocol): + def __init__(self, *args: Any, **kwargs: Any) -> None: + warnings.warn( # deprecated in 11.0 - 2023-04-02 + "ServerConnection was renamed to ServerProtocol", + DeprecationWarning, + ) + super().__init__(*args, **kwargs) diff --git a/hackaton/lib/python3.12/site-packages/websockets/speedups.c b/hackaton/lib/python3.12/site-packages/websockets/speedups.c new file mode 100644 index 0000000..cb10ded --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/websockets/speedups.c @@ -0,0 +1,222 @@ +/* C implementation of performance sensitive functions. */ + +#define PY_SSIZE_T_CLEAN +#include +#include /* uint8_t, uint32_t, uint64_t */ + +#if __ARM_NEON +#include +#elif __SSE2__ +#include +#endif + +static const Py_ssize_t MASK_LEN = 4; + +/* Similar to PyBytes_AsStringAndSize, but accepts more types */ + +static int +_PyBytesLike_AsStringAndSize(PyObject *obj, PyObject **tmp, char **buffer, Py_ssize_t *length) +{ + // This supports bytes, bytearrays, and memoryview objects, + // which are common data structures for handling byte streams. + // If *tmp isn't NULL, the caller gets a new reference. + if (PyBytes_Check(obj)) + { + *tmp = NULL; + *buffer = PyBytes_AS_STRING(obj); + *length = PyBytes_GET_SIZE(obj); + } + else if (PyByteArray_Check(obj)) + { + *tmp = NULL; + *buffer = PyByteArray_AS_STRING(obj); + *length = PyByteArray_GET_SIZE(obj); + } + else if (PyMemoryView_Check(obj)) + { + *tmp = PyMemoryView_GetContiguous(obj, PyBUF_READ, 'C'); + if (*tmp == NULL) + { + return -1; + } + Py_buffer *mv_buf; + mv_buf = PyMemoryView_GET_BUFFER(*tmp); + *buffer = mv_buf->buf; + *length = mv_buf->len; + } + else + { + PyErr_Format( + PyExc_TypeError, + "expected a bytes-like object, %.200s found", + Py_TYPE(obj)->tp_name); + return -1; + } + + return 0; +} + +/* C implementation of websockets.utils.apply_mask */ + +static PyObject * +apply_mask(PyObject *self, PyObject *args, PyObject *kwds) +{ + + // In order to support various bytes-like types, accept any Python object. + + static char *kwlist[] = {"data", "mask", NULL}; + PyObject *input_obj; + PyObject *mask_obj; + + // A pointer to a char * + length will be extracted from the data and mask + // arguments, possibly via a Py_buffer. + + PyObject *input_tmp = NULL; + char *input; + Py_ssize_t input_len; + PyObject *mask_tmp = NULL; + char *mask; + Py_ssize_t mask_len; + + // Initialize a PyBytesObject then get a pointer to the underlying char * + // in order to avoid an extra memory copy in PyBytes_FromStringAndSize. + + PyObject *result = NULL; + char *output; + + // Other variables. + + Py_ssize_t i = 0; + + // Parse inputs. + + if (!PyArg_ParseTupleAndKeywords( + args, kwds, "OO", kwlist, &input_obj, &mask_obj)) + { + goto exit; + } + + if (_PyBytesLike_AsStringAndSize(input_obj, &input_tmp, &input, &input_len) == -1) + { + goto exit; + } + + if (_PyBytesLike_AsStringAndSize(mask_obj, &mask_tmp, &mask, &mask_len) == -1) + { + goto exit; + } + + if (mask_len != MASK_LEN) + { + PyErr_SetString(PyExc_ValueError, "mask must contain 4 bytes"); + goto exit; + } + + // Create output. + + result = PyBytes_FromStringAndSize(NULL, input_len); + if (result == NULL) + { + goto exit; + } + + // Since we just created result, we don't need error checks. + output = PyBytes_AS_STRING(result); + + // Perform the masking operation. + + // Apparently GCC cannot figure out the following optimizations by itself. + + // We need a new scope for MSVC 2010 (non C99 friendly) + { +#if __ARM_NEON + + // With NEON support, XOR by blocks of 16 bytes = 128 bits. + + Py_ssize_t input_len_128 = input_len & ~15; + uint8x16_t mask_128 = vreinterpretq_u8_u32(vdupq_n_u32(*(uint32_t *)mask)); + + for (; i < input_len_128; i += 16) + { + uint8x16_t in_128 = vld1q_u8((uint8_t *)(input + i)); + uint8x16_t out_128 = veorq_u8(in_128, mask_128); + vst1q_u8((uint8_t *)(output + i), out_128); + } + +#elif __SSE2__ + + // With SSE2 support, XOR by blocks of 16 bytes = 128 bits. + + // Since we cannot control the 16-bytes alignment of input and output + // buffers, we rely on loadu/storeu rather than load/store. + + Py_ssize_t input_len_128 = input_len & ~15; + __m128i mask_128 = _mm_set1_epi32(*(uint32_t *)mask); + + for (; i < input_len_128; i += 16) + { + __m128i in_128 = _mm_loadu_si128((__m128i *)(input + i)); + __m128i out_128 = _mm_xor_si128(in_128, mask_128); + _mm_storeu_si128((__m128i *)(output + i), out_128); + } + +#else + + // Without SSE2 support, XOR by blocks of 8 bytes = 64 bits. + + // We assume the memory allocator aligns everything on 8 bytes boundaries. + + Py_ssize_t input_len_64 = input_len & ~7; + uint32_t mask_32 = *(uint32_t *)mask; + uint64_t mask_64 = ((uint64_t)mask_32 << 32) | (uint64_t)mask_32; + + for (; i < input_len_64; i += 8) + { + *(uint64_t *)(output + i) = *(uint64_t *)(input + i) ^ mask_64; + } + +#endif + } + + // XOR the remainder of the input byte by byte. + + for (; i < input_len; i++) + { + output[i] = input[i] ^ mask[i & (MASK_LEN - 1)]; + } + +exit: + Py_XDECREF(input_tmp); + Py_XDECREF(mask_tmp); + return result; + +} + +static PyMethodDef speedups_methods[] = { + { + "apply_mask", + (PyCFunction)apply_mask, + METH_VARARGS | METH_KEYWORDS, + "Apply masking to the data of a WebSocket message.", + }, + {NULL, NULL, 0, NULL}, /* Sentinel */ +}; + +static struct PyModuleDef speedups_module = { + PyModuleDef_HEAD_INIT, + "websocket.speedups", /* m_name */ + "C implementation of performance sensitive functions.", + /* m_doc */ + -1, /* m_size */ + speedups_methods, /* m_methods */ + NULL, + NULL, + NULL, + NULL +}; + +PyMODINIT_FUNC +PyInit_speedups(void) +{ + return PyModule_Create(&speedups_module); +} diff --git a/hackaton/lib/python3.12/site-packages/websockets/speedups.cpython-312-darwin.so b/hackaton/lib/python3.12/site-packages/websockets/speedups.cpython-312-darwin.so new file mode 100755 index 0000000..b3ea4b8 Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/websockets/speedups.cpython-312-darwin.so differ diff --git a/hackaton/lib/python3.12/site-packages/websockets/speedups.pyi b/hackaton/lib/python3.12/site-packages/websockets/speedups.pyi new file mode 100644 index 0000000..821438a --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/websockets/speedups.pyi @@ -0,0 +1 @@ +def apply_mask(data: bytes, mask: bytes) -> bytes: ... diff --git a/hackaton/lib/python3.12/site-packages/websockets/streams.py b/hackaton/lib/python3.12/site-packages/websockets/streams.py new file mode 100644 index 0000000..956f139 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/websockets/streams.py @@ -0,0 +1,151 @@ +from __future__ import annotations + +from typing import Generator + + +class StreamReader: + """ + Generator-based stream reader. + + This class doesn't support concurrent calls to :meth:`read_line`, + :meth:`read_exact`, or :meth:`read_to_eof`. Make sure calls are + serialized. + + """ + + def __init__(self) -> None: + self.buffer = bytearray() + self.eof = False + + def read_line(self, m: int) -> Generator[None, None, bytes]: + """ + Read a LF-terminated line from the stream. + + This is a generator-based coroutine. + + The return value includes the LF character. + + Args: + m: Maximum number bytes to read; this is a security limit. + + Raises: + EOFError: If the stream ends without a LF. + RuntimeError: If the stream ends in more than ``m`` bytes. + + """ + n = 0 # number of bytes to read + p = 0 # number of bytes without a newline + while True: + n = self.buffer.find(b"\n", p) + 1 + if n > 0: + break + p = len(self.buffer) + if p > m: + raise RuntimeError(f"read {p} bytes, expected no more than {m} bytes") + if self.eof: + raise EOFError(f"stream ends after {p} bytes, before end of line") + yield + if n > m: + raise RuntimeError(f"read {n} bytes, expected no more than {m} bytes") + r = self.buffer[:n] + del self.buffer[:n] + return r + + def read_exact(self, n: int) -> Generator[None, None, bytes]: + """ + Read a given number of bytes from the stream. + + This is a generator-based coroutine. + + Args: + n: How many bytes to read. + + Raises: + EOFError: If the stream ends in less than ``n`` bytes. + + """ + assert n >= 0 + while len(self.buffer) < n: + if self.eof: + p = len(self.buffer) + raise EOFError(f"stream ends after {p} bytes, expected {n} bytes") + yield + r = self.buffer[:n] + del self.buffer[:n] + return r + + def read_to_eof(self, m: int) -> Generator[None, None, bytes]: + """ + Read all bytes from the stream. + + This is a generator-based coroutine. + + Args: + m: Maximum number bytes to read; this is a security limit. + + Raises: + RuntimeError: If the stream ends in more than ``m`` bytes. + + """ + while not self.eof: + p = len(self.buffer) + if p > m: + raise RuntimeError(f"read {p} bytes, expected no more than {m} bytes") + yield + r = self.buffer[:] + del self.buffer[:] + return r + + def at_eof(self) -> Generator[None, None, bool]: + """ + Tell whether the stream has ended and all data was read. + + This is a generator-based coroutine. + + """ + while True: + if self.buffer: + return False + if self.eof: + return True + # When all data was read but the stream hasn't ended, we can't + # tell if until either feed_data() or feed_eof() is called. + yield + + def feed_data(self, data: bytes) -> None: + """ + Write data to the stream. + + :meth:`feed_data` cannot be called after :meth:`feed_eof`. + + Args: + data: Data to write. + + Raises: + EOFError: If the stream has ended. + + """ + if self.eof: + raise EOFError("stream ended") + self.buffer += data + + def feed_eof(self) -> None: + """ + End the stream. + + :meth:`feed_eof` cannot be called more than once. + + Raises: + EOFError: If the stream has ended. + + """ + if self.eof: + raise EOFError("stream ended") + self.eof = True + + def discard(self) -> None: + """ + Discard all buffered data, but don't end the stream. + + """ + del self.buffer[:] diff --git a/hackaton/lib/python3.12/site-packages/websockets/sync/__init__.py b/hackaton/lib/python3.12/site-packages/websockets/sync/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/hackaton/lib/python3.12/site-packages/websockets/sync/__pycache__/__init__.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/websockets/sync/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..6d83ce2 Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/websockets/sync/__pycache__/__init__.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/websockets/sync/__pycache__/client.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/websockets/sync/__pycache__/client.cpython-312.pyc new file mode 100644 index 0000000..196314e Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/websockets/sync/__pycache__/client.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/websockets/sync/__pycache__/connection.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/websockets/sync/__pycache__/connection.cpython-312.pyc new file mode 100644 index 0000000..41d3048 Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/websockets/sync/__pycache__/connection.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/websockets/sync/__pycache__/messages.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/websockets/sync/__pycache__/messages.cpython-312.pyc new file mode 100644 index 0000000..a46a89e Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/websockets/sync/__pycache__/messages.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/websockets/sync/__pycache__/server.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/websockets/sync/__pycache__/server.cpython-312.pyc new file mode 100644 index 0000000..3cbcd17 Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/websockets/sync/__pycache__/server.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/websockets/sync/__pycache__/utils.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/websockets/sync/__pycache__/utils.cpython-312.pyc new file mode 100644 index 0000000..fe0dfb6 Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/websockets/sync/__pycache__/utils.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/websockets/sync/client.py b/hackaton/lib/python3.12/site-packages/websockets/sync/client.py new file mode 100644 index 0000000..d1e20a7 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/websockets/sync/client.py @@ -0,0 +1,336 @@ +from __future__ import annotations + +import socket +import ssl as ssl_module +import threading +import warnings +from typing import Any, Sequence + +from ..client import ClientProtocol +from ..datastructures import HeadersLike +from ..extensions.base import ClientExtensionFactory +from ..extensions.permessage_deflate import enable_client_permessage_deflate +from ..headers import validate_subprotocols +from ..http11 import USER_AGENT, Response +from ..protocol import CONNECTING, Event +from ..typing import LoggerLike, Origin, Subprotocol +from ..uri import parse_uri +from .connection import Connection +from .utils import Deadline + + +__all__ = ["connect", "unix_connect", "ClientConnection"] + + +class ClientConnection(Connection): + """ + :mod:`threading` implementation of a WebSocket client connection. + + :class:`ClientConnection` provides :meth:`recv` and :meth:`send` methods for + receiving and sending messages. + + It supports iteration to receive messages:: + + for message in websocket: + process(message) + + The iterator exits normally when the connection is closed with close code + 1000 (OK) or 1001 (going away) or without a close code. It raises a + :exc:`~websockets.exceptions.ConnectionClosedError` when the connection is + closed with any other code. + + Args: + socket: Socket connected to a WebSocket server. + protocol: Sans-I/O connection. + close_timeout: Timeout for closing the connection in seconds. + + """ + + def __init__( + self, + socket: socket.socket, + protocol: ClientProtocol, + *, + close_timeout: float | None = 10, + ) -> None: + self.protocol: ClientProtocol + self.response_rcvd = threading.Event() + super().__init__( + socket, + protocol, + close_timeout=close_timeout, + ) + + def handshake( + self, + additional_headers: HeadersLike | None = None, + user_agent_header: str | None = USER_AGENT, + timeout: float | None = None, + ) -> None: + """ + Perform the opening handshake. + + """ + with self.send_context(expected_state=CONNECTING): + self.request = self.protocol.connect() + if additional_headers is not None: + self.request.headers.update(additional_headers) + if user_agent_header is not None: + self.request.headers["User-Agent"] = user_agent_header + self.protocol.send_request(self.request) + + if not self.response_rcvd.wait(timeout): + raise TimeoutError("timed out during handshake") + + # self.protocol.handshake_exc is always set when the connection is lost + # before receiving a response, when the response cannot be parsed, or + # when the response fails the handshake. + + if self.protocol.handshake_exc is not None: + raise self.protocol.handshake_exc + + def process_event(self, event: Event) -> None: + """ + Process one incoming event. + + """ + # First event - handshake response. + if self.response is None: + assert isinstance(event, Response) + self.response = event + self.response_rcvd.set() + # Later events - frames. + else: + super().process_event(event) + + def recv_events(self) -> None: + """ + Read incoming data from the socket and process events. + + """ + try: + super().recv_events() + finally: + # If the connection is closed during the handshake, unblock it. + self.response_rcvd.set() + + +def connect( + uri: str, + *, + # TCP/TLS + sock: socket.socket | None = None, + ssl: ssl_module.SSLContext | None = None, + server_hostname: str | None = None, + # WebSocket + origin: Origin | None = None, + extensions: Sequence[ClientExtensionFactory] | None = None, + subprotocols: Sequence[Subprotocol] | None = None, + additional_headers: HeadersLike | None = None, + user_agent_header: str | None = USER_AGENT, + compression: str | None = "deflate", + # Timeouts + open_timeout: float | None = 10, + close_timeout: float | None = 10, + # Limits + max_size: int | None = 2**20, + # Logging + logger: LoggerLike | None = None, + # Escape hatch for advanced customization + create_connection: type[ClientConnection] | None = None, + **kwargs: Any, +) -> ClientConnection: + """ + Connect to the WebSocket server at ``uri``. + + This function returns a :class:`ClientConnection` instance, which you can + use to send and receive messages. + + :func:`connect` may be used as a context manager:: + + from websockets.sync.client import connect + + with connect(...) as websocket: + ... + + The connection is closed automatically when exiting the context. + + Args: + uri: URI of the WebSocket server. + sock: Preexisting TCP socket. ``sock`` overrides the host and port + from ``uri``. You may call :func:`socket.create_connection` to + create a suitable TCP socket. + ssl: Configuration for enabling TLS on the connection. + server_hostname: Host name for the TLS handshake. ``server_hostname`` + overrides the host name from ``uri``. + origin: Value of the ``Origin`` header, for servers that require it. + extensions: List of supported extensions, in order in which they + should be negotiated and run. + subprotocols: List of supported subprotocols, in order of decreasing + preference. + additional_headers (HeadersLike | None): Arbitrary HTTP headers to add + to the handshake request. + user_agent_header: Value of the ``User-Agent`` request header. + It defaults to ``"Python/x.y.z websockets/X.Y"``. + Setting it to :obj:`None` removes the header. + compression: The "permessage-deflate" extension is enabled by default. + Set ``compression`` to :obj:`None` to disable it. See the + :doc:`compression guide <../../topics/compression>` for details. + open_timeout: Timeout for opening the connection in seconds. + :obj:`None` disables the timeout. + close_timeout: Timeout for closing the connection in seconds. + :obj:`None` disables the timeout. + max_size: Maximum size of incoming messages in bytes. + :obj:`None` disables the limit. + logger: Logger for this client. + It defaults to ``logging.getLogger("websockets.client")``. + See the :doc:`logging guide <../../topics/logging>` for details. + create_connection: Factory for the :class:`ClientConnection` managing + the connection. Set it to a wrapper or a subclass to customize + connection handling. + + Any other keyword arguments are passed to :func:`~socket.create_connection`. + + Raises: + InvalidURI: If ``uri`` isn't a valid WebSocket URI. + OSError: If the TCP connection fails. + InvalidHandshake: If the opening handshake fails. + TimeoutError: If the opening handshake times out. + + """ + + # Process parameters + + # Backwards compatibility: ssl used to be called ssl_context. + if ssl is None and "ssl_context" in kwargs: + ssl = kwargs.pop("ssl_context") + warnings.warn( # deprecated in 13.0 - 2024-08-20 + "ssl_context was renamed to ssl", + DeprecationWarning, + ) + + wsuri = parse_uri(uri) + if not wsuri.secure and ssl is not None: + raise TypeError("ssl argument is incompatible with a ws:// URI") + + # Private APIs for unix_connect() + unix: bool = kwargs.pop("unix", False) + path: str | None = kwargs.pop("path", None) + + if unix: + if path is None and sock is None: + raise TypeError("missing path argument") + elif path is not None and sock is not None: + raise TypeError("path and sock arguments are incompatible") + + if subprotocols is not None: + validate_subprotocols(subprotocols) + + if compression == "deflate": + extensions = enable_client_permessage_deflate(extensions) + elif compression is not None: + raise ValueError(f"unsupported compression: {compression}") + + # Calculate timeouts on the TCP, TLS, and WebSocket handshakes. + # The TCP and TLS timeouts must be set on the socket, then removed + # to avoid conflicting with the WebSocket timeout in handshake(). + deadline = Deadline(open_timeout) + + if create_connection is None: + create_connection = ClientConnection + + try: + # Connect socket + + if sock is None: + if unix: + sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + sock.settimeout(deadline.timeout()) + assert path is not None # mypy cannot figure this out + sock.connect(path) + else: + kwargs.setdefault("timeout", deadline.timeout()) + sock = socket.create_connection((wsuri.host, wsuri.port), **kwargs) + sock.settimeout(None) + + # Disable Nagle algorithm + + if not unix: + sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, True) + + # Initialize TLS wrapper and perform TLS handshake + + if wsuri.secure: + if ssl is None: + ssl = ssl_module.create_default_context() + if server_hostname is None: + server_hostname = wsuri.host + sock.settimeout(deadline.timeout()) + sock = ssl.wrap_socket(sock, server_hostname=server_hostname) + sock.settimeout(None) + + # Initialize WebSocket protocol + + protocol = ClientProtocol( + wsuri, + origin=origin, + extensions=extensions, + subprotocols=subprotocols, + max_size=max_size, + logger=logger, + ) + + # Initialize WebSocket connection + + connection = create_connection( + sock, + protocol, + close_timeout=close_timeout, + ) + except Exception: + if sock is not None: + sock.close() + raise + + try: + connection.handshake( + additional_headers, + user_agent_header, + deadline.timeout(), + ) + except Exception: + connection.close_socket() + connection.recv_events_thread.join() + raise + + return connection + + +def unix_connect( + path: str | None = None, + uri: str | None = None, + **kwargs: Any, +) -> ClientConnection: + """ + Connect to a WebSocket server listening on a Unix socket. + + This function accepts the same keyword arguments as :func:`connect`. + + It's only available on Unix. + + It's mainly useful for debugging servers listening on Unix sockets. + + Args: + path: File system path to the Unix socket. + uri: URI of the WebSocket server. ``uri`` defaults to + ``ws://localhost/`` or, when a ``ssl`` is provided, to + ``wss://localhost/``. + + """ + if uri is None: + # Backwards compatibility: ssl used to be called ssl_context. + if kwargs.get("ssl") is None and kwargs.get("ssl_context") is None: + uri = "ws://localhost/" + else: + uri = "wss://localhost/" + return connect(uri=uri, unix=True, path=path, **kwargs) diff --git a/hackaton/lib/python3.12/site-packages/websockets/sync/connection.py b/hackaton/lib/python3.12/site-packages/websockets/sync/connection.py new file mode 100644 index 0000000..9758887 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/websockets/sync/connection.py @@ -0,0 +1,801 @@ +from __future__ import annotations + +import contextlib +import logging +import random +import socket +import struct +import threading +import uuid +from types import TracebackType +from typing import Any, Iterable, Iterator, Mapping + +from ..exceptions import ( + ConcurrencyError, + ConnectionClosed, + ConnectionClosedOK, + ProtocolError, +) +from ..frames import DATA_OPCODES, BytesLike, CloseCode, Frame, Opcode +from ..http11 import Request, Response +from ..protocol import CLOSED, OPEN, Event, Protocol, State +from ..typing import Data, LoggerLike, Subprotocol +from .messages import Assembler +from .utils import Deadline + + +__all__ = ["Connection"] + + +class Connection: + """ + :mod:`threading` implementation of a WebSocket connection. + + :class:`Connection` provides APIs shared between WebSocket servers and + clients. + + You shouldn't use it directly. Instead, use + :class:`~websockets.sync.client.ClientConnection` or + :class:`~websockets.sync.server.ServerConnection`. + + """ + + recv_bufsize = 65536 + + def __init__( + self, + socket: socket.socket, + protocol: Protocol, + *, + close_timeout: float | None = 10, + ) -> None: + self.socket = socket + self.protocol = protocol + self.close_timeout = close_timeout + + # Inject reference to this instance in the protocol's logger. + self.protocol.logger = logging.LoggerAdapter( + self.protocol.logger, + {"websocket": self}, + ) + + # Copy attributes from the protocol for convenience. + self.id: uuid.UUID = self.protocol.id + """Unique identifier of the connection. Useful in logs.""" + self.logger: LoggerLike = self.protocol.logger + """Logger for this connection.""" + self.debug = self.protocol.debug + + # HTTP handshake request and response. + self.request: Request | None = None + """Opening handshake request.""" + self.response: Response | None = None + """Opening handshake response.""" + + # Mutex serializing interactions with the protocol. + self.protocol_mutex = threading.Lock() + + # Assembler turning frames into messages and serializing reads. + self.recv_messages = Assembler() + + # Whether we are busy sending a fragmented message. + self.send_in_progress = False + + # Deadline for the closing handshake. + self.close_deadline: Deadline | None = None + + # Mapping of ping IDs to pong waiters, in chronological order. + self.ping_waiters: dict[bytes, threading.Event] = {} + + # Receiving events from the socket. This thread is marked as daemon to + # allow creating a connection in a non-daemon thread and using it in a + # daemon thread. This mustn't prevent the interpreter from exiting. + self.recv_events_thread = threading.Thread( + target=self.recv_events, + daemon=True, + ) + self.recv_events_thread.start() + + # Exception raised in recv_events, to be chained to ConnectionClosed + # in the user thread in order to show why the TCP connection dropped. + self.recv_exc: BaseException | None = None + + # Public attributes + + @property + def local_address(self) -> Any: + """ + Local address of the connection. + + For IPv4 connections, this is a ``(host, port)`` tuple. + + The format of the address depends on the address family. + See :meth:`~socket.socket.getsockname`. + + """ + return self.socket.getsockname() + + @property + def remote_address(self) -> Any: + """ + Remote address of the connection. + + For IPv4 connections, this is a ``(host, port)`` tuple. + + The format of the address depends on the address family. + See :meth:`~socket.socket.getpeername`. + + """ + return self.socket.getpeername() + + @property + def subprotocol(self) -> Subprotocol | None: + """ + Subprotocol negotiated during the opening handshake. + + :obj:`None` if no subprotocol was negotiated. + + """ + return self.protocol.subprotocol + + # Public methods + + def __enter__(self) -> Connection: + return self + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + traceback: TracebackType | None, + ) -> None: + if exc_type is None: + self.close() + else: + self.close(CloseCode.INTERNAL_ERROR) + + def __iter__(self) -> Iterator[Data]: + """ + Iterate on incoming messages. + + The iterator calls :meth:`recv` and yields messages in an infinite loop. + + It exits when the connection is closed normally. It raises a + :exc:`~websockets.exceptions.ConnectionClosedError` exception after a + protocol error or a network failure. + + """ + try: + while True: + yield self.recv() + except ConnectionClosedOK: + return + + def recv(self, timeout: float | None = None) -> Data: + """ + Receive the next message. + + When the connection is closed, :meth:`recv` raises + :exc:`~websockets.exceptions.ConnectionClosed`. Specifically, it raises + :exc:`~websockets.exceptions.ConnectionClosedOK` after a normal closure + and :exc:`~websockets.exceptions.ConnectionClosedError` after a protocol + error or a network failure. This is how you detect the end of the + message stream. + + If ``timeout`` is :obj:`None`, block until a message is received. If + ``timeout`` is set and no message is received within ``timeout`` + seconds, raise :exc:`TimeoutError`. Set ``timeout`` to ``0`` to check if + a message was already received. + + If the message is fragmented, wait until all fragments are received, + reassemble them, and return the whole message. + + Returns: + A string (:class:`str`) for a Text_ frame or a bytestring + (:class:`bytes`) for a Binary_ frame. + + .. _Text: https://datatracker.ietf.org/doc/html/rfc6455#section-5.6 + .. _Binary: https://datatracker.ietf.org/doc/html/rfc6455#section-5.6 + + Raises: + ConnectionClosed: When the connection is closed. + ConcurrencyError: If two threads call :meth:`recv` or + :meth:`recv_streaming` concurrently. + + """ + try: + return self.recv_messages.get(timeout) + except EOFError: + # Wait for the protocol state to be CLOSED before accessing close_exc. + self.recv_events_thread.join() + raise self.protocol.close_exc from self.recv_exc + except ConcurrencyError: + raise ConcurrencyError( + "cannot call recv while another thread " + "is already running recv or recv_streaming" + ) from None + + def recv_streaming(self) -> Iterator[Data]: + """ + Receive the next message frame by frame. + + If the message is fragmented, yield each fragment as it is received. + The iterator must be fully consumed, or else the connection will become + unusable. + + :meth:`recv_streaming` raises the same exceptions as :meth:`recv`. + + Returns: + An iterator of strings (:class:`str`) for a Text_ frame or + bytestrings (:class:`bytes`) for a Binary_ frame. + + .. _Text: https://datatracker.ietf.org/doc/html/rfc6455#section-5.6 + .. _Binary: https://datatracker.ietf.org/doc/html/rfc6455#section-5.6 + + Raises: + ConnectionClosed: When the connection is closed. + ConcurrencyError: If two threads call :meth:`recv` or + :meth:`recv_streaming` concurrently. + + """ + try: + for frame in self.recv_messages.get_iter(): + yield frame + except EOFError: + # Wait for the protocol state to be CLOSED before accessing close_exc. + self.recv_events_thread.join() + raise self.protocol.close_exc from self.recv_exc + except ConcurrencyError: + raise ConcurrencyError( + "cannot call recv_streaming while another thread " + "is already running recv or recv_streaming" + ) from None + + def send(self, message: Data | Iterable[Data]) -> None: + """ + Send a message. + + A string (:class:`str`) is sent as a Text_ frame. A bytestring or + bytes-like object (:class:`bytes`, :class:`bytearray`, or + :class:`memoryview`) is sent as a Binary_ frame. + + .. _Text: https://datatracker.ietf.org/doc/html/rfc6455#section-5.6 + .. _Binary: https://datatracker.ietf.org/doc/html/rfc6455#section-5.6 + + :meth:`send` also accepts an iterable of strings, bytestrings, or + bytes-like objects to enable fragmentation_. Each item is treated as a + message fragment and sent in its own frame. All items must be of the + same type, or else :meth:`send` will raise a :exc:`TypeError` and the + connection will be closed. + + .. _fragmentation: https://datatracker.ietf.org/doc/html/rfc6455#section-5.4 + + :meth:`send` rejects dict-like objects because this is often an error. + (If you really want to send the keys of a dict-like object as fragments, + call its :meth:`~dict.keys` method and pass the result to :meth:`send`.) + + When the connection is closed, :meth:`send` raises + :exc:`~websockets.exceptions.ConnectionClosed`. Specifically, it + raises :exc:`~websockets.exceptions.ConnectionClosedOK` after a normal + connection closure and + :exc:`~websockets.exceptions.ConnectionClosedError` after a protocol + error or a network failure. + + Args: + message: Message to send. + + Raises: + ConnectionClosed: When the connection is closed. + ConcurrencyError: If the connection is sending a fragmented message. + TypeError: If ``message`` doesn't have a supported type. + + """ + # Unfragmented message -- this case must be handled first because + # strings and bytes-like objects are iterable. + + if isinstance(message, str): + with self.send_context(): + if self.send_in_progress: + raise ConcurrencyError( + "cannot call send while another thread " + "is already running send" + ) + self.protocol.send_text(message.encode()) + + elif isinstance(message, BytesLike): + with self.send_context(): + if self.send_in_progress: + raise ConcurrencyError( + "cannot call send while another thread " + "is already running send" + ) + self.protocol.send_binary(message) + + # Catch a common mistake -- passing a dict to send(). + + elif isinstance(message, Mapping): + raise TypeError("data is a dict-like object") + + # Fragmented message -- regular iterator. + + elif isinstance(message, Iterable): + chunks = iter(message) + try: + chunk = next(chunks) + except StopIteration: + return + + try: + # First fragment. + if isinstance(chunk, str): + text = True + with self.send_context(): + if self.send_in_progress: + raise ConcurrencyError( + "cannot call send while another thread " + "is already running send" + ) + self.send_in_progress = True + self.protocol.send_text( + chunk.encode(), + fin=False, + ) + elif isinstance(chunk, BytesLike): + text = False + with self.send_context(): + if self.send_in_progress: + raise ConcurrencyError( + "cannot call send while another thread " + "is already running send" + ) + self.send_in_progress = True + self.protocol.send_binary( + chunk, + fin=False, + ) + else: + raise TypeError("data iterable must contain bytes or str") + + # Other fragments + for chunk in chunks: + if isinstance(chunk, str) and text: + with self.send_context(): + assert self.send_in_progress + self.protocol.send_continuation( + chunk.encode(), + fin=False, + ) + elif isinstance(chunk, BytesLike) and not text: + with self.send_context(): + assert self.send_in_progress + self.protocol.send_continuation( + chunk, + fin=False, + ) + else: + raise TypeError("data iterable must contain uniform types") + + # Final fragment. + with self.send_context(): + self.protocol.send_continuation(b"", fin=True) + self.send_in_progress = False + + except ConcurrencyError: + # We didn't start sending a fragmented message. + # The connection is still usable. + raise + + except Exception: + # We're half-way through a fragmented message and we can't + # complete it. This makes the connection unusable. + with self.send_context(): + self.protocol.fail( + CloseCode.INTERNAL_ERROR, + "error in fragmented message", + ) + raise + + else: + raise TypeError("data must be str, bytes, or iterable") + + def close(self, code: int = CloseCode.NORMAL_CLOSURE, reason: str = "") -> None: + """ + Perform the closing handshake. + + :meth:`close` waits for the other end to complete the handshake, for the + TCP connection to terminate, and for all incoming messages to be read + with :meth:`recv`. + + :meth:`close` is idempotent: it doesn't do anything once the + connection is closed. + + Args: + code: WebSocket close code. + reason: WebSocket close reason. + + """ + try: + # The context manager takes care of waiting for the TCP connection + # to terminate after calling a method that sends a close frame. + with self.send_context(): + if self.send_in_progress: + self.protocol.fail( + CloseCode.INTERNAL_ERROR, + "close during fragmented message", + ) + else: + self.protocol.send_close(code, reason) + except ConnectionClosed: + # Ignore ConnectionClosed exceptions raised from send_context(). + # They mean that the connection is closed, which was the goal. + pass + + def ping(self, data: Data | None = None) -> threading.Event: + """ + Send a Ping_. + + .. _Ping: https://datatracker.ietf.org/doc/html/rfc6455#section-5.5.2 + + A ping may serve as a keepalive or as a check that the remote endpoint + received all messages up to this point + + Args: + data: Payload of the ping. A :class:`str` will be encoded to UTF-8. + If ``data`` is :obj:`None`, the payload is four random bytes. + + Returns: + An event that will be set when the corresponding pong is received. + You can ignore it if you don't intend to wait. + + :: + + pong_event = ws.ping() + pong_event.wait() # only if you want to wait for the pong + + Raises: + ConnectionClosed: When the connection is closed. + ConcurrencyError: If another ping was sent with the same data and + the corresponding pong wasn't received yet. + + """ + if isinstance(data, BytesLike): + data = bytes(data) + elif isinstance(data, str): + data = data.encode() + elif data is not None: + raise TypeError("data must be str or bytes-like") + + with self.send_context(): + # Protect against duplicates if a payload is explicitly set. + if data in self.ping_waiters: + raise ConcurrencyError("already waiting for a pong with the same data") + + # Generate a unique random payload otherwise. + while data is None or data in self.ping_waiters: + data = struct.pack("!I", random.getrandbits(32)) + + pong_waiter = threading.Event() + self.ping_waiters[data] = pong_waiter + self.protocol.send_ping(data) + return pong_waiter + + def pong(self, data: Data = b"") -> None: + """ + Send a Pong_. + + .. _Pong: https://datatracker.ietf.org/doc/html/rfc6455#section-5.5.3 + + An unsolicited pong may serve as a unidirectional heartbeat. + + Args: + data: Payload of the pong. A :class:`str` will be encoded to UTF-8. + + Raises: + ConnectionClosed: When the connection is closed. + + """ + if isinstance(data, BytesLike): + data = bytes(data) + elif isinstance(data, str): + data = data.encode() + else: + raise TypeError("data must be str or bytes-like") + + with self.send_context(): + self.protocol.send_pong(data) + + # Private methods + + def process_event(self, event: Event) -> None: + """ + Process one incoming event. + + This method is overridden in subclasses to handle the handshake. + + """ + assert isinstance(event, Frame) + if event.opcode in DATA_OPCODES: + self.recv_messages.put(event) + + if event.opcode is Opcode.PONG: + self.acknowledge_pings(bytes(event.data)) + + def acknowledge_pings(self, data: bytes) -> None: + """ + Acknowledge pings when receiving a pong. + + """ + with self.protocol_mutex: + # Ignore unsolicited pong. + if data not in self.ping_waiters: + return + # Sending a pong for only the most recent ping is legal. + # Acknowledge all previous pings too in that case. + ping_id = None + ping_ids = [] + for ping_id, ping in self.ping_waiters.items(): + ping_ids.append(ping_id) + ping.set() + if ping_id == data: + break + else: + raise AssertionError("solicited pong not found in pings") + # Remove acknowledged pings from self.ping_waiters. + for ping_id in ping_ids: + del self.ping_waiters[ping_id] + + def recv_events(self) -> None: + """ + Read incoming data from the socket and process events. + + Run this method in a thread as long as the connection is alive. + + ``recv_events()`` exits immediately when the ``self.socket`` is closed. + + """ + try: + while True: + try: + if self.close_deadline is not None: + self.socket.settimeout(self.close_deadline.timeout()) + data = self.socket.recv(self.recv_bufsize) + except Exception as exc: + if self.debug: + self.logger.debug("error while receiving data", exc_info=True) + # When the closing handshake is initiated by our side, + # recv() may block until send_context() closes the socket. + # In that case, send_context() already set recv_exc. + # Calling set_recv_exc() avoids overwriting it. + with self.protocol_mutex: + self.set_recv_exc(exc) + break + + if data == b"": + break + + # Acquire the connection lock. + with self.protocol_mutex: + # Feed incoming data to the protocol. + self.protocol.receive_data(data) + + # This isn't expected to raise an exception. + events = self.protocol.events_received() + + # Write outgoing data to the socket. + try: + self.send_data() + except Exception as exc: + if self.debug: + self.logger.debug("error while sending data", exc_info=True) + # Similarly to the above, avoid overriding an exception + # set by send_context(), in case of a race condition + # i.e. send_context() closes the socket after recv() + # returns above but before send_data() calls send(). + self.set_recv_exc(exc) + break + + if self.protocol.close_expected(): + # If the connection is expected to close soon, set the + # close deadline based on the close timeout. + if self.close_deadline is None: + self.close_deadline = Deadline(self.close_timeout) + + # Unlock conn_mutex before processing events. Else, the + # application can't send messages in response to events. + + # If self.send_data raised an exception, then events are lost. + # Given that automatic responses write small amounts of data, + # this should be uncommon, so we don't handle the edge case. + + try: + for event in events: + # This may raise EOFError if the closing handshake + # times out while a message is waiting to be read. + self.process_event(event) + except EOFError: + break + + # Breaking out of the while True: ... loop means that we believe + # that the socket doesn't work anymore. + with self.protocol_mutex: + # Feed the end of the data stream to the protocol. + self.protocol.receive_eof() + + # This isn't expected to generate events. + assert not self.protocol.events_received() + + # There is no error handling because send_data() can only write + # the end of the data stream here and it handles errors itself. + self.send_data() + + except Exception as exc: + # This branch should never run. It's a safety net in case of bugs. + self.logger.error("unexpected internal error", exc_info=True) + with self.protocol_mutex: + self.set_recv_exc(exc) + finally: + # This isn't expected to raise an exception. + self.close_socket() + + @contextlib.contextmanager + def send_context( + self, + *, + expected_state: State = OPEN, # CONNECTING during the opening handshake + ) -> Iterator[None]: + """ + Create a context for writing to the connection from user code. + + On entry, :meth:`send_context` acquires the connection lock and checks + that the connection is open; on exit, it writes outgoing data to the + socket:: + + with self.send_context(): + self.protocol.send_text(message.encode()) + + When the connection isn't open on entry, when the connection is expected + to close on exit, or when an unexpected error happens, terminating the + connection, :meth:`send_context` waits until the connection is closed + then raises :exc:`~websockets.exceptions.ConnectionClosed`. + + """ + # Should we wait until the connection is closed? + wait_for_close = False + # Should we close the socket and raise ConnectionClosed? + raise_close_exc = False + # What exception should we chain ConnectionClosed to? + original_exc: BaseException | None = None + + # Acquire the protocol lock. + with self.protocol_mutex: + if self.protocol.state is expected_state: + # Let the caller interact with the protocol. + try: + yield + except (ProtocolError, ConcurrencyError): + # The protocol state wasn't changed. Exit immediately. + raise + except Exception as exc: + self.logger.error("unexpected internal error", exc_info=True) + # This branch should never run. It's a safety net in case of + # bugs. Since we don't know what happened, we will close the + # connection and raise the exception to the caller. + wait_for_close = False + raise_close_exc = True + original_exc = exc + else: + # Check if the connection is expected to close soon. + if self.protocol.close_expected(): + wait_for_close = True + # If the connection is expected to close soon, set the + # close deadline based on the close timeout. + # Since we tested earlier that protocol.state was OPEN + # (or CONNECTING) and we didn't release protocol_mutex, + # it is certain that self.close_deadline is still None. + assert self.close_deadline is None + self.close_deadline = Deadline(self.close_timeout) + # Write outgoing data to the socket. + try: + self.send_data() + except Exception as exc: + if self.debug: + self.logger.debug("error while sending data", exc_info=True) + # While the only expected exception here is OSError, + # other exceptions would be treated identically. + wait_for_close = False + raise_close_exc = True + original_exc = exc + + else: # self.protocol.state is not expected_state + # Minor layering violation: we assume that the connection + # will be closing soon if it isn't in the expected state. + wait_for_close = True + raise_close_exc = True + + # To avoid a deadlock, release the connection lock by exiting the + # context manager before waiting for recv_events() to terminate. + + # If the connection is expected to close soon and the close timeout + # elapses, close the socket to terminate the connection. + if wait_for_close: + if self.close_deadline is None: + timeout = self.close_timeout + else: + # Thread.join() returns immediately if timeout is negative. + timeout = self.close_deadline.timeout(raise_if_elapsed=False) + self.recv_events_thread.join(timeout) + + if self.recv_events_thread.is_alive(): + # There's no risk to overwrite another error because + # original_exc is never set when wait_for_close is True. + assert original_exc is None + original_exc = TimeoutError("timed out while closing connection") + # Set recv_exc before closing the socket in order to get + # proper exception reporting. + raise_close_exc = True + with self.protocol_mutex: + self.set_recv_exc(original_exc) + + # If an error occurred, close the socket to terminate the connection and + # raise an exception. + if raise_close_exc: + self.close_socket() + # Wait for the protocol state to be CLOSED before accessing close_exc. + self.recv_events_thread.join() + raise self.protocol.close_exc from original_exc + + def send_data(self) -> None: + """ + Send outgoing data. + + This method requires holding protocol_mutex. + + Raises: + OSError: When a socket operations fails. + + """ + assert self.protocol_mutex.locked() + for data in self.protocol.data_to_send(): + if data: + if self.close_deadline is not None: + self.socket.settimeout(self.close_deadline.timeout()) + self.socket.sendall(data) + else: + try: + self.socket.shutdown(socket.SHUT_WR) + except OSError: # socket already closed + pass + + def set_recv_exc(self, exc: BaseException | None) -> None: + """ + Set recv_exc, if not set yet. + + This method requires holding protocol_mutex. + + """ + assert self.protocol_mutex.locked() + if self.recv_exc is None: # pragma: no branch + self.recv_exc = exc + + def close_socket(self) -> None: + """ + Shutdown and close socket. Close message assembler. + + Calling close_socket() guarantees that recv_events() terminates. Indeed, + recv_events() may block only on socket.recv() or on recv_messages.put(). + + """ + # shutdown() is required to interrupt recv() on Linux. + try: + self.socket.shutdown(socket.SHUT_RDWR) + except OSError: + pass # socket is already closed + self.socket.close() + + # Calling protocol.receive_eof() is safe because it's idempotent. + # This guarantees that the protocol state becomes CLOSED. + self.protocol.receive_eof() + assert self.protocol.state is CLOSED + + # Abort recv() with a ConnectionClosed exception. + self.recv_messages.close() diff --git a/hackaton/lib/python3.12/site-packages/websockets/sync/messages.py b/hackaton/lib/python3.12/site-packages/websockets/sync/messages.py new file mode 100644 index 0000000..8d09053 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/websockets/sync/messages.py @@ -0,0 +1,283 @@ +from __future__ import annotations + +import codecs +import queue +import threading +from typing import Iterator, cast + +from ..exceptions import ConcurrencyError +from ..frames import OP_BINARY, OP_CONT, OP_TEXT, Frame +from ..typing import Data + + +__all__ = ["Assembler"] + +UTF8Decoder = codecs.getincrementaldecoder("utf-8") + + +class Assembler: + """ + Assemble messages from frames. + + """ + + def __init__(self) -> None: + # Serialize reads and writes -- except for reads via synchronization + # primitives provided by the threading and queue modules. + self.mutex = threading.Lock() + + # We create a latch with two events to synchronize the production of + # frames and the consumption of messages (or frames) without a buffer. + # This design requires a switch between the library thread and the user + # thread for each message; that shouldn't be a performance bottleneck. + + # put() sets this event to tell get() that a message can be fetched. + self.message_complete = threading.Event() + # get() sets this event to let put() that the message was fetched. + self.message_fetched = threading.Event() + + # This flag prevents concurrent calls to get() by user code. + self.get_in_progress = False + # This flag prevents concurrent calls to put() by library code. + self.put_in_progress = False + + # Decoder for text frames, None for binary frames. + self.decoder: codecs.IncrementalDecoder | None = None + + # Buffer of frames belonging to the same message. + self.chunks: list[Data] = [] + + # When switching from "buffering" to "streaming", we use a thread-safe + # queue for transferring frames from the writing thread (library code) + # to the reading thread (user code). We're buffering when chunks_queue + # is None and streaming when it's a SimpleQueue. None is a sentinel + # value marking the end of the message, superseding message_complete. + + # Stream data from frames belonging to the same message. + self.chunks_queue: queue.SimpleQueue[Data | None] | None = None + + # This flag marks the end of the connection. + self.closed = False + + def get(self, timeout: float | None = None) -> Data: + """ + Read the next message. + + :meth:`get` returns a single :class:`str` or :class:`bytes`. + + If the message is fragmented, :meth:`get` waits until the last frame is + received, then it reassembles the message and returns it. To receive + messages frame by frame, use :meth:`get_iter` instead. + + Args: + timeout: If a timeout is provided and elapses before a complete + message is received, :meth:`get` raises :exc:`TimeoutError`. + + Raises: + EOFError: If the stream of frames has ended. + ConcurrencyError: If two threads run :meth:`get` or :meth:`get_iter` + concurrently. + TimeoutError: If a timeout is provided and elapses before a + complete message is received. + + """ + with self.mutex: + if self.closed: + raise EOFError("stream of frames ended") + + if self.get_in_progress: + raise ConcurrencyError("get() or get_iter() is already running") + + self.get_in_progress = True + + # If the message_complete event isn't set yet, release the lock to + # allow put() to run and eventually set it. + # Locking with get_in_progress ensures only one thread can get here. + completed = self.message_complete.wait(timeout) + + with self.mutex: + self.get_in_progress = False + + # Waiting for a complete message timed out. + if not completed: + raise TimeoutError(f"timed out in {timeout:.1f}s") + + # get() was unblocked by close() rather than put(). + if self.closed: + raise EOFError("stream of frames ended") + + assert self.message_complete.is_set() + self.message_complete.clear() + + joiner: Data = b"" if self.decoder is None else "" + # mypy cannot figure out that chunks have the proper type. + message: Data = joiner.join(self.chunks) # type: ignore + + self.chunks = [] + assert self.chunks_queue is None + + assert not self.message_fetched.is_set() + self.message_fetched.set() + + return message + + def get_iter(self) -> Iterator[Data]: + """ + Stream the next message. + + Iterating the return value of :meth:`get_iter` yields a :class:`str` or + :class:`bytes` for each frame in the message. + + The iterator must be fully consumed before calling :meth:`get_iter` or + :meth:`get` again. Else, :exc:`ConcurrencyError` is raised. + + This method only makes sense for fragmented messages. If messages aren't + fragmented, use :meth:`get` instead. + + Raises: + EOFError: If the stream of frames has ended. + ConcurrencyError: If two threads run :meth:`get` or :meth:`get_iter` + concurrently. + + """ + with self.mutex: + if self.closed: + raise EOFError("stream of frames ended") + + if self.get_in_progress: + raise ConcurrencyError("get() or get_iter() is already running") + + chunks = self.chunks + self.chunks = [] + self.chunks_queue = cast( + # Remove quotes around type when dropping Python < 3.9. + "queue.SimpleQueue[Data | None]", + queue.SimpleQueue(), + ) + + # Sending None in chunk_queue supersedes setting message_complete + # when switching to "streaming". If message is already complete + # when the switch happens, put() didn't send None, so we have to. + if self.message_complete.is_set(): + self.chunks_queue.put(None) + + self.get_in_progress = True + + # Locking with get_in_progress ensures only one thread can get here. + chunk: Data | None + for chunk in chunks: + yield chunk + while (chunk := self.chunks_queue.get()) is not None: + yield chunk + + with self.mutex: + self.get_in_progress = False + + # get_iter() was unblocked by close() rather than put(). + if self.closed: + raise EOFError("stream of frames ended") + + assert self.message_complete.is_set() + self.message_complete.clear() + + assert self.chunks == [] + self.chunks_queue = None + + assert not self.message_fetched.is_set() + self.message_fetched.set() + + def put(self, frame: Frame) -> None: + """ + Add ``frame`` to the next message. + + When ``frame`` is the final frame in a message, :meth:`put` waits until + the message is fetched, which can be achieved by calling :meth:`get` or + by fully consuming the return value of :meth:`get_iter`. + + :meth:`put` assumes that the stream of frames respects the protocol. If + it doesn't, the behavior is undefined. + + Raises: + EOFError: If the stream of frames has ended. + ConcurrencyError: If two threads run :meth:`put` concurrently. + + """ + with self.mutex: + if self.closed: + raise EOFError("stream of frames ended") + + if self.put_in_progress: + raise ConcurrencyError("put is already running") + + if frame.opcode is OP_TEXT: + self.decoder = UTF8Decoder(errors="strict") + elif frame.opcode is OP_BINARY: + self.decoder = None + else: + assert frame.opcode is OP_CONT + + data: Data + if self.decoder is not None: + data = self.decoder.decode(frame.data, frame.fin) + else: + data = frame.data + + if self.chunks_queue is None: + self.chunks.append(data) + else: + self.chunks_queue.put(data) + + if not frame.fin: + return + + # Message is complete. Wait until it's fetched to return. + + assert not self.message_complete.is_set() + self.message_complete.set() + + if self.chunks_queue is not None: + self.chunks_queue.put(None) + + assert not self.message_fetched.is_set() + + self.put_in_progress = True + + # Release the lock to allow get() to run and eventually set the event. + # Locking with put_in_progress ensures only one coroutine can get here. + self.message_fetched.wait() + + with self.mutex: + self.put_in_progress = False + + # put() was unblocked by close() rather than get() or get_iter(). + if self.closed: + raise EOFError("stream of frames ended") + + assert self.message_fetched.is_set() + self.message_fetched.clear() + + self.decoder = None + + def close(self) -> None: + """ + End the stream of frames. + + Callling :meth:`close` concurrently with :meth:`get`, :meth:`get_iter`, + or :meth:`put` is safe. They will raise :exc:`EOFError`. + + """ + with self.mutex: + if self.closed: + return + + self.closed = True + + # Unblock get or get_iter. + if self.get_in_progress: + self.message_complete.set() + if self.chunks_queue is not None: + self.chunks_queue.put(None) + + # Unblock put(). + if self.put_in_progress: + self.message_fetched.set() diff --git a/hackaton/lib/python3.12/site-packages/websockets/sync/server.py b/hackaton/lib/python3.12/site-packages/websockets/sync/server.py new file mode 100644 index 0000000..1b7cbb4 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/websockets/sync/server.py @@ -0,0 +1,727 @@ +from __future__ import annotations + +import hmac +import http +import logging +import os +import selectors +import socket +import ssl as ssl_module +import sys +import threading +import warnings +from types import TracebackType +from typing import Any, Callable, Iterable, Sequence, Tuple, cast + +from ..exceptions import InvalidHeader +from ..extensions.base import ServerExtensionFactory +from ..extensions.permessage_deflate import enable_server_permessage_deflate +from ..frames import CloseCode +from ..headers import ( + build_www_authenticate_basic, + parse_authorization_basic, + validate_subprotocols, +) +from ..http11 import SERVER, Request, Response +from ..protocol import CONNECTING, OPEN, Event +from ..server import ServerProtocol +from ..typing import LoggerLike, Origin, StatusLike, Subprotocol +from .connection import Connection +from .utils import Deadline + + +__all__ = ["serve", "unix_serve", "ServerConnection", "Server", "basic_auth"] + + +class ServerConnection(Connection): + """ + :mod:`threading` implementation of a WebSocket server connection. + + :class:`ServerConnection` provides :meth:`recv` and :meth:`send` methods for + receiving and sending messages. + + It supports iteration to receive messages:: + + for message in websocket: + process(message) + + The iterator exits normally when the connection is closed with close code + 1000 (OK) or 1001 (going away) or without a close code. It raises a + :exc:`~websockets.exceptions.ConnectionClosedError` when the connection is + closed with any other code. + + Args: + socket: Socket connected to a WebSocket client. + protocol: Sans-I/O connection. + close_timeout: Timeout for closing the connection in seconds. + + """ + + def __init__( + self, + socket: socket.socket, + protocol: ServerProtocol, + *, + close_timeout: float | None = 10, + ) -> None: + self.protocol: ServerProtocol + self.request_rcvd = threading.Event() + super().__init__( + socket, + protocol, + close_timeout=close_timeout, + ) + self.username: str # see basic_auth() + + def respond(self, status: StatusLike, text: str) -> Response: + """ + Create a plain text HTTP response. + + ``process_request`` and ``process_response`` may call this method to + return an HTTP response instead of performing the WebSocket opening + handshake. + + You can modify the response before returning it, for example by changing + HTTP headers. + + Args: + status: HTTP status code. + text: HTTP response body; it will be encoded to UTF-8. + + Returns: + HTTP response to send to the client. + + """ + return self.protocol.reject(status, text) + + def handshake( + self, + process_request: ( + Callable[ + [ServerConnection, Request], + Response | None, + ] + | None + ) = None, + process_response: ( + Callable[ + [ServerConnection, Request, Response], + Response | None, + ] + | None + ) = None, + server_header: str | None = SERVER, + timeout: float | None = None, + ) -> None: + """ + Perform the opening handshake. + + """ + if not self.request_rcvd.wait(timeout): + raise TimeoutError("timed out during handshake") + + if self.request is not None: + with self.send_context(expected_state=CONNECTING): + response = None + + if process_request is not None: + try: + response = process_request(self, self.request) + except Exception as exc: + self.protocol.handshake_exc = exc + response = self.protocol.reject( + http.HTTPStatus.INTERNAL_SERVER_ERROR, + ( + "Failed to open a WebSocket connection.\n" + "See server log for more information.\n" + ), + ) + + if response is None: + self.response = self.protocol.accept(self.request) + else: + self.response = response + + if server_header: + self.response.headers["Server"] = server_header + + response = None + + if process_response is not None: + try: + response = process_response(self, self.request, self.response) + except Exception as exc: + self.protocol.handshake_exc = exc + response = self.protocol.reject( + http.HTTPStatus.INTERNAL_SERVER_ERROR, + ( + "Failed to open a WebSocket connection.\n" + "See server log for more information.\n" + ), + ) + + if response is not None: + self.response = response + + self.protocol.send_response(self.response) + + # self.protocol.handshake_exc is always set when the connection is lost + # before receiving a request, when the request cannot be parsed, when + # the handshake encounters an error, or when process_request or + # process_response sends an HTTP response that rejects the handshake. + + if self.protocol.handshake_exc is not None: + raise self.protocol.handshake_exc + + def process_event(self, event: Event) -> None: + """ + Process one incoming event. + + """ + # First event - handshake request. + if self.request is None: + assert isinstance(event, Request) + self.request = event + self.request_rcvd.set() + # Later events - frames. + else: + super().process_event(event) + + def recv_events(self) -> None: + """ + Read incoming data from the socket and process events. + + """ + try: + super().recv_events() + finally: + # If the connection is closed during the handshake, unblock it. + self.request_rcvd.set() + + +class Server: + """ + WebSocket server returned by :func:`serve`. + + This class mirrors the API of :class:`~socketserver.BaseServer`, notably the + :meth:`~socketserver.BaseServer.serve_forever` and + :meth:`~socketserver.BaseServer.shutdown` methods, as well as the context + manager protocol. + + Args: + socket: Server socket listening for new connections. + handler: Handler for one connection. Receives the socket and address + returned by :meth:`~socket.socket.accept`. + logger: Logger for this server. + It defaults to ``logging.getLogger("websockets.server")``. + See the :doc:`logging guide <../../topics/logging>` for details. + + """ + + def __init__( + self, + socket: socket.socket, + handler: Callable[[socket.socket, Any], None], + logger: LoggerLike | None = None, + ) -> None: + self.socket = socket + self.handler = handler + if logger is None: + logger = logging.getLogger("websockets.server") + self.logger = logger + if sys.platform != "win32": + self.shutdown_watcher, self.shutdown_notifier = os.pipe() + + def serve_forever(self) -> None: + """ + See :meth:`socketserver.BaseServer.serve_forever`. + + This method doesn't return. Calling :meth:`shutdown` from another thread + stops the server. + + Typical use:: + + with serve(...) as server: + server.serve_forever() + + """ + poller = selectors.DefaultSelector() + try: + poller.register(self.socket, selectors.EVENT_READ) + except ValueError: # pragma: no cover + # If shutdown() is called before poller.register(), + # the socket is closed and poller.register() raises + # ValueError: Invalid file descriptor: -1 + return + if sys.platform != "win32": + poller.register(self.shutdown_watcher, selectors.EVENT_READ) + + while True: + poller.select() + try: + # If the socket is closed, this will raise an exception and exit + # the loop. So we don't need to check the return value of select(). + sock, addr = self.socket.accept() + except OSError: + break + # Since there isn't a mechanism for tracking connections and waiting + # for them to terminate, we cannot use daemon threads, or else all + # connections would be terminate brutally when closing the server. + thread = threading.Thread(target=self.handler, args=(sock, addr)) + thread.start() + + def shutdown(self) -> None: + """ + See :meth:`socketserver.BaseServer.shutdown`. + + """ + self.socket.close() + if sys.platform != "win32": + os.write(self.shutdown_notifier, b"x") + + def fileno(self) -> int: + """ + See :meth:`socketserver.BaseServer.fileno`. + + """ + return self.socket.fileno() + + def __enter__(self) -> Server: + return self + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + traceback: TracebackType | None, + ) -> None: + self.shutdown() + + +def __getattr__(name: str) -> Any: + if name == "WebSocketServer": + warnings.warn( # deprecated in 13.0 - 2024-08-20 + "WebSocketServer was renamed to Server", + DeprecationWarning, + ) + return Server + raise AttributeError(f"module {__name__!r} has no attribute {name!r}") + + +def serve( + handler: Callable[[ServerConnection], None], + host: str | None = None, + port: int | None = None, + *, + # TCP/TLS + sock: socket.socket | None = None, + ssl: ssl_module.SSLContext | None = None, + # WebSocket + origins: Sequence[Origin | None] | None = None, + extensions: Sequence[ServerExtensionFactory] | None = None, + subprotocols: Sequence[Subprotocol] | None = None, + select_subprotocol: ( + Callable[ + [ServerConnection, Sequence[Subprotocol]], + Subprotocol | None, + ] + | None + ) = None, + process_request: ( + Callable[ + [ServerConnection, Request], + Response | None, + ] + | None + ) = None, + process_response: ( + Callable[ + [ServerConnection, Request, Response], + Response | None, + ] + | None + ) = None, + server_header: str | None = SERVER, + compression: str | None = "deflate", + # Timeouts + open_timeout: float | None = 10, + close_timeout: float | None = 10, + # Limits + max_size: int | None = 2**20, + # Logging + logger: LoggerLike | None = None, + # Escape hatch for advanced customization + create_connection: type[ServerConnection] | None = None, + **kwargs: Any, +) -> Server: + """ + Create a WebSocket server listening on ``host`` and ``port``. + + Whenever a client connects, the server creates a :class:`ServerConnection`, + performs the opening handshake, and delegates to the ``handler``. + + The handler receives the :class:`ServerConnection` instance, which you can + use to send and receive messages. + + Once the handler completes, either normally or with an exception, the server + performs the closing handshake and closes the connection. + + This function returns a :class:`Server` whose API mirrors + :class:`~socketserver.BaseServer`. Treat it as a context manager to ensure + that it will be closed and call :meth:`~Server.serve_forever` to serve + requests:: + + from websockets.sync.server import serve + + def handler(websocket): + ... + + with serve(handler, ...) as server: + server.serve_forever() + + Args: + handler: Connection handler. It receives the WebSocket connection, + which is a :class:`ServerConnection`, in argument. + host: Network interfaces the server binds to. + See :func:`~socket.create_server` for details. + port: TCP port the server listens on. + See :func:`~socket.create_server` for details. + sock: Preexisting TCP socket. ``sock`` replaces ``host`` and ``port``. + You may call :func:`socket.create_server` to create a suitable TCP + socket. + ssl: Configuration for enabling TLS on the connection. + origins: Acceptable values of the ``Origin`` header, for defending + against Cross-Site WebSocket Hijacking attacks. Include :obj:`None` + in the list if the lack of an origin is acceptable. + extensions: List of supported extensions, in order in which they + should be negotiated and run. + subprotocols: List of supported subprotocols, in order of decreasing + preference. + select_subprotocol: Callback for selecting a subprotocol among + those supported by the client and the server. It receives a + :class:`ServerConnection` (not a + :class:`~websockets.server.ServerProtocol`!) instance and a list of + subprotocols offered by the client. Other than the first argument, + it has the same behavior as the + :meth:`ServerProtocol.select_subprotocol + ` method. + process_request: Intercept the request during the opening handshake. + Return an HTTP response to force the response. Return :obj:`None` to + continue normally. When you force an HTTP 101 Continue response, the + handshake is successful. Else, the connection is aborted. + process_response: Intercept the response during the opening handshake. + Modify the response or return a new HTTP response to force the + response. Return :obj:`None` to continue normally. When you force an + HTTP 101 Continue response, the handshake is successful. Else, the + connection is aborted. + server_header: Value of the ``Server`` response header. + It defaults to ``"Python/x.y.z websockets/X.Y"``. Setting it to + :obj:`None` removes the header. + compression: The "permessage-deflate" extension is enabled by default. + Set ``compression`` to :obj:`None` to disable it. See the + :doc:`compression guide <../../topics/compression>` for details. + open_timeout: Timeout for opening connections in seconds. + :obj:`None` disables the timeout. + close_timeout: Timeout for closing connections in seconds. + :obj:`None` disables the timeout. + max_size: Maximum size of incoming messages in bytes. + :obj:`None` disables the limit. + logger: Logger for this server. + It defaults to ``logging.getLogger("websockets.server")``. See the + :doc:`logging guide <../../topics/logging>` for details. + create_connection: Factory for the :class:`ServerConnection` managing + the connection. Set it to a wrapper or a subclass to customize + connection handling. + + Any other keyword arguments are passed to :func:`~socket.create_server`. + + """ + + # Process parameters + + # Backwards compatibility: ssl used to be called ssl_context. + if ssl is None and "ssl_context" in kwargs: + ssl = kwargs.pop("ssl_context") + warnings.warn( # deprecated in 13.0 - 2024-08-20 + "ssl_context was renamed to ssl", + DeprecationWarning, + ) + + if subprotocols is not None: + validate_subprotocols(subprotocols) + + if compression == "deflate": + extensions = enable_server_permessage_deflate(extensions) + elif compression is not None: + raise ValueError(f"unsupported compression: {compression}") + + if create_connection is None: + create_connection = ServerConnection + + # Bind socket and listen + + # Private APIs for unix_connect() + unix: bool = kwargs.pop("unix", False) + path: str | None = kwargs.pop("path", None) + + if sock is None: + if unix: + if path is None: + raise TypeError("missing path argument") + kwargs.setdefault("family", socket.AF_UNIX) + sock = socket.create_server(path, **kwargs) + else: + sock = socket.create_server((host, port), **kwargs) + else: + if path is not None: + raise TypeError("path and sock arguments are incompatible") + + # Initialize TLS wrapper + + if ssl is not None: + sock = ssl.wrap_socket( + sock, + server_side=True, + # Delay TLS handshake until after we set a timeout on the socket. + do_handshake_on_connect=False, + ) + + # Define request handler + + def conn_handler(sock: socket.socket, addr: Any) -> None: + # Calculate timeouts on the TLS and WebSocket handshakes. + # The TLS timeout must be set on the socket, then removed + # to avoid conflicting with the WebSocket timeout in handshake(). + deadline = Deadline(open_timeout) + + try: + # Disable Nagle algorithm + + if not unix: + sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, True) + + # Perform TLS handshake + + if ssl is not None: + sock.settimeout(deadline.timeout()) + # mypy cannot figure this out + assert isinstance(sock, ssl_module.SSLSocket) + sock.do_handshake() + sock.settimeout(None) + + # Create a closure to give select_subprotocol access to connection. + protocol_select_subprotocol: ( + Callable[ + [ServerProtocol, Sequence[Subprotocol]], + Subprotocol | None, + ] + | None + ) = None + if select_subprotocol is not None: + + def protocol_select_subprotocol( + protocol: ServerProtocol, + subprotocols: Sequence[Subprotocol], + ) -> Subprotocol | None: + # mypy doesn't know that select_subprotocol is immutable. + assert select_subprotocol is not None + # Ensure this function is only used in the intended context. + assert protocol is connection.protocol + return select_subprotocol(connection, subprotocols) + + # Initialize WebSocket protocol + + protocol = ServerProtocol( + origins=origins, + extensions=extensions, + subprotocols=subprotocols, + select_subprotocol=protocol_select_subprotocol, + max_size=max_size, + logger=logger, + ) + + # Initialize WebSocket connection + + assert create_connection is not None # help mypy + connection = create_connection( + sock, + protocol, + close_timeout=close_timeout, + ) + except Exception: + sock.close() + return + + try: + try: + connection.handshake( + process_request, + process_response, + server_header, + deadline.timeout(), + ) + except TimeoutError: + connection.close_socket() + connection.recv_events_thread.join() + return + except Exception: + connection.logger.error("opening handshake failed", exc_info=True) + connection.close_socket() + connection.recv_events_thread.join() + return + + assert connection.protocol.state is OPEN + try: + handler(connection) + except Exception: + connection.logger.error("connection handler failed", exc_info=True) + connection.close(CloseCode.INTERNAL_ERROR) + else: + connection.close() + + except Exception: # pragma: no cover + # Don't leak sockets on unexpected errors. + sock.close() + + # Initialize server + + return Server(sock, conn_handler, logger) + + +def unix_serve( + handler: Callable[[ServerConnection], None], + path: str | None = None, + **kwargs: Any, +) -> Server: + """ + Create a WebSocket server listening on a Unix socket. + + This function accepts the same keyword arguments as :func:`serve`. + + It's only available on Unix. + + It's useful for deploying a server behind a reverse proxy such as nginx. + + Args: + handler: Connection handler. It receives the WebSocket connection, + which is a :class:`ServerConnection`, in argument. + path: File system path to the Unix socket. + + """ + return serve(handler, unix=True, path=path, **kwargs) + + +def is_credentials(credentials: Any) -> bool: + try: + username, password = credentials + except (TypeError, ValueError): + return False + else: + return isinstance(username, str) and isinstance(password, str) + + +def basic_auth( + realm: str = "", + credentials: tuple[str, str] | Iterable[tuple[str, str]] | None = None, + check_credentials: Callable[[str, str], bool] | None = None, +) -> Callable[[ServerConnection, Request], Response | None]: + """ + Factory for ``process_request`` to enforce HTTP Basic Authentication. + + :func:`basic_auth` is designed to integrate with :func:`serve` as follows:: + + from websockets.sync.server import basic_auth, serve + + with serve( + ..., + process_request=basic_auth( + realm="my dev server", + credentials=("hello", "iloveyou"), + ), + ): + + If authentication succeeds, the connection's ``username`` attribute is set. + If it fails, the server responds with an HTTP 401 Unauthorized status. + + One of ``credentials`` or ``check_credentials`` must be provided; not both. + + Args: + realm: Scope of protection. It should contain only ASCII characters + because the encoding of non-ASCII characters is undefined. Refer to + section 2.2 of :rfc:`7235` for details. + credentials: Hard coded authorized credentials. It can be a + ``(username, password)`` pair or a list of such pairs. + check_credentials: Function that verifies credentials. + It receives ``username`` and ``password`` arguments and returns + whether they're valid. + Raises: + TypeError: If ``credentials`` or ``check_credentials`` is wrong. + + """ + if (credentials is None) == (check_credentials is None): + raise TypeError("provide either credentials or check_credentials") + + if credentials is not None: + if is_credentials(credentials): + credentials_list = [cast(Tuple[str, str], credentials)] + elif isinstance(credentials, Iterable): + credentials_list = list(cast(Iterable[Tuple[str, str]], credentials)) + if not all(is_credentials(item) for item in credentials_list): + raise TypeError(f"invalid credentials argument: {credentials}") + else: + raise TypeError(f"invalid credentials argument: {credentials}") + + credentials_dict = dict(credentials_list) + + def check_credentials(username: str, password: str) -> bool: + try: + expected_password = credentials_dict[username] + except KeyError: + return False + return hmac.compare_digest(expected_password, password) + + assert check_credentials is not None # help mypy + + def process_request( + connection: ServerConnection, + request: Request, + ) -> Response | None: + """ + Perform HTTP Basic Authentication. + + If it succeeds, set the connection's ``username`` attribute and return + :obj:`None`. If it fails, return an HTTP 401 Unauthorized responss. + + """ + try: + authorization = request.headers["Authorization"] + except KeyError: + response = connection.respond( + http.HTTPStatus.UNAUTHORIZED, + "Missing credentials\n", + ) + response.headers["WWW-Authenticate"] = build_www_authenticate_basic(realm) + return response + + try: + username, password = parse_authorization_basic(authorization) + except InvalidHeader: + response = connection.respond( + http.HTTPStatus.UNAUTHORIZED, + "Unsupported credentials\n", + ) + response.headers["WWW-Authenticate"] = build_www_authenticate_basic(realm) + return response + + if not check_credentials(username, password): + response = connection.respond( + http.HTTPStatus.UNAUTHORIZED, + "Invalid credentials\n", + ) + response.headers["WWW-Authenticate"] = build_www_authenticate_basic(realm) + return response + + connection.username = username + return None + + return process_request diff --git a/hackaton/lib/python3.12/site-packages/websockets/sync/utils.py b/hackaton/lib/python3.12/site-packages/websockets/sync/utils.py new file mode 100644 index 0000000..00bce2c --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/websockets/sync/utils.py @@ -0,0 +1,45 @@ +from __future__ import annotations + +import time + + +__all__ = ["Deadline"] + + +class Deadline: + """ + Manage timeouts across multiple steps. + + Args: + timeout: Time available in seconds or :obj:`None` if there is no limit. + + """ + + def __init__(self, timeout: float | None) -> None: + self.deadline: float | None + if timeout is None: + self.deadline = None + else: + self.deadline = time.monotonic() + timeout + + def timeout(self, *, raise_if_elapsed: bool = True) -> float | None: + """ + Calculate a timeout from a deadline. + + Args: + raise_if_elapsed: Whether to raise :exc:`TimeoutError` + if the deadline lapsed. + + Raises: + TimeoutError: If the deadline lapsed. + + Returns: + Time left in seconds or :obj:`None` if there is no limit. + + """ + if self.deadline is None: + return None + timeout = self.deadline - time.monotonic() + if raise_if_elapsed and timeout <= 0: + raise TimeoutError("timed out") + return timeout diff --git a/hackaton/lib/python3.12/site-packages/websockets/typing.py b/hackaton/lib/python3.12/site-packages/websockets/typing.py new file mode 100644 index 0000000..447fe79 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/websockets/typing.py @@ -0,0 +1,77 @@ +from __future__ import annotations + +import http +import logging +import typing +from typing import Any, List, NewType, Optional, Tuple, Union + + +__all__ = [ + "Data", + "LoggerLike", + "StatusLike", + "Origin", + "Subprotocol", + "ExtensionName", + "ExtensionParameter", +] + + +# Public types used in the signature of public APIs + +# Change to str | bytes when dropping Python < 3.10. +Data = Union[str, bytes] +"""Types supported in a WebSocket message: +:class:`str` for a Text_ frame, :class:`bytes` for a Binary_. + +.. _Text: https://datatracker.ietf.org/doc/html/rfc6455#section-5.6 +.. _Binary : https://datatracker.ietf.org/doc/html/rfc6455#section-5.6 + +""" + + +# Change to logging.Logger | ... when dropping Python < 3.10. +if typing.TYPE_CHECKING: + LoggerLike = Union[logging.Logger, logging.LoggerAdapter[Any]] + """Types accepted where a :class:`~logging.Logger` is expected.""" +else: # remove this branch when dropping support for Python < 3.11 + LoggerLike = Union[logging.Logger, logging.LoggerAdapter] + """Types accepted where a :class:`~logging.Logger` is expected.""" + + +# Change to http.HTTPStatus | int when dropping Python < 3.10. +StatusLike = Union[http.HTTPStatus, int] +""" +Types accepted where an :class:`~http.HTTPStatus` is expected.""" + + +Origin = NewType("Origin", str) +"""Value of a ``Origin`` header.""" + + +Subprotocol = NewType("Subprotocol", str) +"""Subprotocol in a ``Sec-WebSocket-Protocol`` header.""" + + +ExtensionName = NewType("ExtensionName", str) +"""Name of a WebSocket extension.""" + +# Change to tuple[str, Optional[str]] when dropping Python < 3.9. +# Change to tuple[str, str | None] when dropping Python < 3.10. +ExtensionParameter = Tuple[str, Optional[str]] +"""Parameter of a WebSocket extension.""" + + +# Private types + +# Change to tuple[.., list[...]] when dropping Python < 3.9. +ExtensionHeader = Tuple[ExtensionName, List[ExtensionParameter]] +"""Extension in a ``Sec-WebSocket-Extensions`` header.""" + + +ConnectionOption = NewType("ConnectionOption", str) +"""Connection option in a ``Connection`` header.""" + + +UpgradeProtocol = NewType("UpgradeProtocol", str) +"""Upgrade protocol in an ``Upgrade`` header.""" diff --git a/hackaton/lib/python3.12/site-packages/websockets/uri.py b/hackaton/lib/python3.12/site-packages/websockets/uri.py new file mode 100644 index 0000000..16bb3f1 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/websockets/uri.py @@ -0,0 +1,107 @@ +from __future__ import annotations + +import dataclasses +import urllib.parse + +from .exceptions import InvalidURI + + +__all__ = ["parse_uri", "WebSocketURI"] + + +@dataclasses.dataclass +class WebSocketURI: + """ + WebSocket URI. + + Attributes: + secure: :obj:`True` for a ``wss`` URI, :obj:`False` for a ``ws`` URI. + host: Normalized to lower case. + port: Always set even if it's the default. + path: May be empty. + query: May be empty if the URI doesn't include a query component. + username: Available when the URI contains `User Information`_. + password: Available when the URI contains `User Information`_. + + .. _User Information: https://datatracker.ietf.org/doc/html/rfc3986#section-3.2.1 + + """ + + secure: bool + host: str + port: int + path: str + query: str + username: str | None = None + password: str | None = None + + @property + def resource_name(self) -> str: + if self.path: + resource_name = self.path + else: + resource_name = "/" + if self.query: + resource_name += "?" + self.query + return resource_name + + @property + def user_info(self) -> tuple[str, str] | None: + if self.username is None: + return None + assert self.password is not None + return (self.username, self.password) + + +# All characters from the gen-delims and sub-delims sets in RFC 3987. +DELIMS = ":/?#[]@!$&'()*+,;=" + + +def parse_uri(uri: str) -> WebSocketURI: + """ + Parse and validate a WebSocket URI. + + Args: + uri: WebSocket URI. + + Returns: + Parsed WebSocket URI. + + Raises: + InvalidURI: If ``uri`` isn't a valid WebSocket URI. + + """ + parsed = urllib.parse.urlparse(uri) + if parsed.scheme not in ["ws", "wss"]: + raise InvalidURI(uri, "scheme isn't ws or wss") + if parsed.hostname is None: + raise InvalidURI(uri, "hostname isn't provided") + if parsed.fragment != "": + raise InvalidURI(uri, "fragment identifier is meaningless") + + secure = parsed.scheme == "wss" + host = parsed.hostname + port = parsed.port or (443 if secure else 80) + path = parsed.path + query = parsed.query + username = parsed.username + password = parsed.password + # urllib.parse.urlparse accepts URLs with a username but without a + # password. This doesn't make sense for HTTP Basic Auth credentials. + if username is not None and password is None: + raise InvalidURI(uri, "username provided without password") + + try: + uri.encode("ascii") + except UnicodeEncodeError: + # Input contains non-ASCII characters. + # It must be an IRI. Convert it to a URI. + host = host.encode("idna").decode() + path = urllib.parse.quote(path, safe=DELIMS) + query = urllib.parse.quote(query, safe=DELIMS) + if username is not None: + assert password is not None + username = urllib.parse.quote(username, safe=DELIMS) + password = urllib.parse.quote(password, safe=DELIMS) + + return WebSocketURI(secure, host, port, path, query, username, password) diff --git a/hackaton/lib/python3.12/site-packages/websockets/utils.py b/hackaton/lib/python3.12/site-packages/websockets/utils.py new file mode 100644 index 0000000..62d2dc1 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/websockets/utils.py @@ -0,0 +1,51 @@ +from __future__ import annotations + +import base64 +import hashlib +import secrets +import sys + + +__all__ = ["accept_key", "apply_mask"] + + +GUID = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11" + + +def generate_key() -> str: + """ + Generate a random key for the Sec-WebSocket-Key header. + + """ + key = secrets.token_bytes(16) + return base64.b64encode(key).decode() + + +def accept_key(key: str) -> str: + """ + Compute the value of the Sec-WebSocket-Accept header. + + Args: + key: Value of the Sec-WebSocket-Key header. + + """ + sha1 = hashlib.sha1((key + GUID).encode()).digest() + return base64.b64encode(sha1).decode() + + +def apply_mask(data: bytes, mask: bytes) -> bytes: + """ + Apply masking to the data of a WebSocket message. + + Args: + data: Data to mask. + mask: 4-bytes mask. + + """ + if len(mask) != 4: + raise ValueError("mask must contain 4 bytes") + + data_int = int.from_bytes(data, sys.byteorder) + mask_repeated = mask * (len(data) // 4) + mask[: len(data) % 4] + mask_int = int.from_bytes(mask_repeated, sys.byteorder) + return (data_int ^ mask_int).to_bytes(len(data), sys.byteorder) diff --git a/hackaton/lib/python3.12/site-packages/websockets/version.py b/hackaton/lib/python3.12/site-packages/websockets/version.py new file mode 100644 index 0000000..00b0a98 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/websockets/version.py @@ -0,0 +1,92 @@ +from __future__ import annotations + +import importlib.metadata + + +__all__ = ["tag", "version", "commit"] + + +# ========= =========== =================== +# release development +# ========= =========== =================== +# tag X.Y X.Y (upcoming) +# version X.Y X.Y.dev1+g5678cde +# commit X.Y 5678cde +# ========= =========== =================== + + +# When tagging a release, set `released = True`. +# After tagging a release, set `released = False` and increment `tag`. + +released = True + +tag = version = commit = "13.1" + + +if not released: # pragma: no cover + import pathlib + import re + import subprocess + + def get_version(tag: str) -> str: + # Since setup.py executes the contents of src/websockets/version.py, + # __file__ can point to either of these two files. + file_path = pathlib.Path(__file__) + root_dir = file_path.parents[0 if file_path.name == "setup.py" else 2] + + # Read version from package metadata if it is installed. + try: + version = importlib.metadata.version("websockets") + except ImportError: + pass + else: + # Check that this file belongs to the installed package. + files = importlib.metadata.files("websockets") + if files: + version_files = [f for f in files if f.name == file_path.name] + if version_files: + version_file = version_files[0] + if version_file.locate() == file_path: + return version + + # Read version from git if available. + try: + description = subprocess.run( + ["git", "describe", "--dirty", "--tags", "--long"], + capture_output=True, + cwd=root_dir, + timeout=1, + check=True, + text=True, + ).stdout.strip() + # subprocess.run raises FileNotFoundError if git isn't on $PATH. + except ( + FileNotFoundError, + subprocess.CalledProcessError, + subprocess.TimeoutExpired, + ): + pass + else: + description_re = r"[0-9.]+-([0-9]+)-(g[0-9a-f]{7,}(?:-dirty)?)" + match = re.fullmatch(description_re, description) + if match is None: + raise ValueError(f"Unexpected git description: {description}") + distance, remainder = match.groups() + remainder = remainder.replace("-", ".") # required by PEP 440 + return f"{tag}.dev{distance}+{remainder}" + + # Avoid crashing if the development version cannot be determined. + return f"{tag}.dev0+gunknown" + + version = get_version(tag) + + def get_commit(tag: str, version: str) -> str: + # Extract commit from version, falling back to tag if not available. + version_re = r"[0-9.]+\.dev[0-9]+\+g([0-9a-f]{7,}|unknown)(?:\.dirty)?" + match = re.fullmatch(version_re, version) + if match is None: + raise ValueError(f"Unexpected version: {version}") + (commit,) = match.groups() + return tag if commit == "unknown" else commit + + commit = get_commit(tag, version) diff --git a/hackaton/lib/python3.12/site-packages/yaml/__init__.py b/hackaton/lib/python3.12/site-packages/yaml/__init__.py new file mode 100644 index 0000000..2ec4f20 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/yaml/__init__.py @@ -0,0 +1,390 @@ + +from .error import * + +from .tokens import * +from .events import * +from .nodes import * + +from .loader import * +from .dumper import * + +__version__ = '6.0.2' +try: + from .cyaml import * + __with_libyaml__ = True +except ImportError: + __with_libyaml__ = False + +import io + +#------------------------------------------------------------------------------ +# XXX "Warnings control" is now deprecated. Leaving in the API function to not +# break code that uses it. +#------------------------------------------------------------------------------ +def warnings(settings=None): + if settings is None: + return {} + +#------------------------------------------------------------------------------ +def scan(stream, Loader=Loader): + """ + Scan a YAML stream and produce scanning tokens. + """ + loader = Loader(stream) + try: + while loader.check_token(): + yield loader.get_token() + finally: + loader.dispose() + +def parse(stream, Loader=Loader): + """ + Parse a YAML stream and produce parsing events. + """ + loader = Loader(stream) + try: + while loader.check_event(): + yield loader.get_event() + finally: + loader.dispose() + +def compose(stream, Loader=Loader): + """ + Parse the first YAML document in a stream + and produce the corresponding representation tree. + """ + loader = Loader(stream) + try: + return loader.get_single_node() + finally: + loader.dispose() + +def compose_all(stream, Loader=Loader): + """ + Parse all YAML documents in a stream + and produce corresponding representation trees. + """ + loader = Loader(stream) + try: + while loader.check_node(): + yield loader.get_node() + finally: + loader.dispose() + +def load(stream, Loader): + """ + Parse the first YAML document in a stream + and produce the corresponding Python object. + """ + loader = Loader(stream) + try: + return loader.get_single_data() + finally: + loader.dispose() + +def load_all(stream, Loader): + """ + Parse all YAML documents in a stream + and produce corresponding Python objects. + """ + loader = Loader(stream) + try: + while loader.check_data(): + yield loader.get_data() + finally: + loader.dispose() + +def full_load(stream): + """ + Parse the first YAML document in a stream + and produce the corresponding Python object. + + Resolve all tags except those known to be + unsafe on untrusted input. + """ + return load(stream, FullLoader) + +def full_load_all(stream): + """ + Parse all YAML documents in a stream + and produce corresponding Python objects. + + Resolve all tags except those known to be + unsafe on untrusted input. + """ + return load_all(stream, FullLoader) + +def safe_load(stream): + """ + Parse the first YAML document in a stream + and produce the corresponding Python object. + + Resolve only basic YAML tags. This is known + to be safe for untrusted input. + """ + return load(stream, SafeLoader) + +def safe_load_all(stream): + """ + Parse all YAML documents in a stream + and produce corresponding Python objects. + + Resolve only basic YAML tags. This is known + to be safe for untrusted input. + """ + return load_all(stream, SafeLoader) + +def unsafe_load(stream): + """ + Parse the first YAML document in a stream + and produce the corresponding Python object. + + Resolve all tags, even those known to be + unsafe on untrusted input. + """ + return load(stream, UnsafeLoader) + +def unsafe_load_all(stream): + """ + Parse all YAML documents in a stream + and produce corresponding Python objects. + + Resolve all tags, even those known to be + unsafe on untrusted input. + """ + return load_all(stream, UnsafeLoader) + +def emit(events, stream=None, Dumper=Dumper, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None): + """ + Emit YAML parsing events into a stream. + If stream is None, return the produced string instead. + """ + getvalue = None + if stream is None: + stream = io.StringIO() + getvalue = stream.getvalue + dumper = Dumper(stream, canonical=canonical, indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break) + try: + for event in events: + dumper.emit(event) + finally: + dumper.dispose() + if getvalue: + return getvalue() + +def serialize_all(nodes, stream=None, Dumper=Dumper, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + """ + Serialize a sequence of representation trees into a YAML stream. + If stream is None, return the produced string instead. + """ + getvalue = None + if stream is None: + if encoding is None: + stream = io.StringIO() + else: + stream = io.BytesIO() + getvalue = stream.getvalue + dumper = Dumper(stream, canonical=canonical, indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break, + encoding=encoding, version=version, tags=tags, + explicit_start=explicit_start, explicit_end=explicit_end) + try: + dumper.open() + for node in nodes: + dumper.serialize(node) + dumper.close() + finally: + dumper.dispose() + if getvalue: + return getvalue() + +def serialize(node, stream=None, Dumper=Dumper, **kwds): + """ + Serialize a representation tree into a YAML stream. + If stream is None, return the produced string instead. + """ + return serialize_all([node], stream, Dumper=Dumper, **kwds) + +def dump_all(documents, stream=None, Dumper=Dumper, + default_style=None, default_flow_style=False, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None, sort_keys=True): + """ + Serialize a sequence of Python objects into a YAML stream. + If stream is None, return the produced string instead. + """ + getvalue = None + if stream is None: + if encoding is None: + stream = io.StringIO() + else: + stream = io.BytesIO() + getvalue = stream.getvalue + dumper = Dumper(stream, default_style=default_style, + default_flow_style=default_flow_style, + canonical=canonical, indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break, + encoding=encoding, version=version, tags=tags, + explicit_start=explicit_start, explicit_end=explicit_end, sort_keys=sort_keys) + try: + dumper.open() + for data in documents: + dumper.represent(data) + dumper.close() + finally: + dumper.dispose() + if getvalue: + return getvalue() + +def dump(data, stream=None, Dumper=Dumper, **kwds): + """ + Serialize a Python object into a YAML stream. + If stream is None, return the produced string instead. + """ + return dump_all([data], stream, Dumper=Dumper, **kwds) + +def safe_dump_all(documents, stream=None, **kwds): + """ + Serialize a sequence of Python objects into a YAML stream. + Produce only basic YAML tags. + If stream is None, return the produced string instead. + """ + return dump_all(documents, stream, Dumper=SafeDumper, **kwds) + +def safe_dump(data, stream=None, **kwds): + """ + Serialize a Python object into a YAML stream. + Produce only basic YAML tags. + If stream is None, return the produced string instead. + """ + return dump_all([data], stream, Dumper=SafeDumper, **kwds) + +def add_implicit_resolver(tag, regexp, first=None, + Loader=None, Dumper=Dumper): + """ + Add an implicit scalar detector. + If an implicit scalar value matches the given regexp, + the corresponding tag is assigned to the scalar. + first is a sequence of possible initial characters or None. + """ + if Loader is None: + loader.Loader.add_implicit_resolver(tag, regexp, first) + loader.FullLoader.add_implicit_resolver(tag, regexp, first) + loader.UnsafeLoader.add_implicit_resolver(tag, regexp, first) + else: + Loader.add_implicit_resolver(tag, regexp, first) + Dumper.add_implicit_resolver(tag, regexp, first) + +def add_path_resolver(tag, path, kind=None, Loader=None, Dumper=Dumper): + """ + Add a path based resolver for the given tag. + A path is a list of keys that forms a path + to a node in the representation tree. + Keys can be string values, integers, or None. + """ + if Loader is None: + loader.Loader.add_path_resolver(tag, path, kind) + loader.FullLoader.add_path_resolver(tag, path, kind) + loader.UnsafeLoader.add_path_resolver(tag, path, kind) + else: + Loader.add_path_resolver(tag, path, kind) + Dumper.add_path_resolver(tag, path, kind) + +def add_constructor(tag, constructor, Loader=None): + """ + Add a constructor for the given tag. + Constructor is a function that accepts a Loader instance + and a node object and produces the corresponding Python object. + """ + if Loader is None: + loader.Loader.add_constructor(tag, constructor) + loader.FullLoader.add_constructor(tag, constructor) + loader.UnsafeLoader.add_constructor(tag, constructor) + else: + Loader.add_constructor(tag, constructor) + +def add_multi_constructor(tag_prefix, multi_constructor, Loader=None): + """ + Add a multi-constructor for the given tag prefix. + Multi-constructor is called for a node if its tag starts with tag_prefix. + Multi-constructor accepts a Loader instance, a tag suffix, + and a node object and produces the corresponding Python object. + """ + if Loader is None: + loader.Loader.add_multi_constructor(tag_prefix, multi_constructor) + loader.FullLoader.add_multi_constructor(tag_prefix, multi_constructor) + loader.UnsafeLoader.add_multi_constructor(tag_prefix, multi_constructor) + else: + Loader.add_multi_constructor(tag_prefix, multi_constructor) + +def add_representer(data_type, representer, Dumper=Dumper): + """ + Add a representer for the given type. + Representer is a function accepting a Dumper instance + and an instance of the given data type + and producing the corresponding representation node. + """ + Dumper.add_representer(data_type, representer) + +def add_multi_representer(data_type, multi_representer, Dumper=Dumper): + """ + Add a representer for the given type. + Multi-representer is a function accepting a Dumper instance + and an instance of the given data type or subtype + and producing the corresponding representation node. + """ + Dumper.add_multi_representer(data_type, multi_representer) + +class YAMLObjectMetaclass(type): + """ + The metaclass for YAMLObject. + """ + def __init__(cls, name, bases, kwds): + super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds) + if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None: + if isinstance(cls.yaml_loader, list): + for loader in cls.yaml_loader: + loader.add_constructor(cls.yaml_tag, cls.from_yaml) + else: + cls.yaml_loader.add_constructor(cls.yaml_tag, cls.from_yaml) + + cls.yaml_dumper.add_representer(cls, cls.to_yaml) + +class YAMLObject(metaclass=YAMLObjectMetaclass): + """ + An object that can dump itself to a YAML stream + and load itself from a YAML stream. + """ + + __slots__ = () # no direct instantiation, so allow immutable subclasses + + yaml_loader = [Loader, FullLoader, UnsafeLoader] + yaml_dumper = Dumper + + yaml_tag = None + yaml_flow_style = None + + @classmethod + def from_yaml(cls, loader, node): + """ + Convert a representation node to a Python object. + """ + return loader.construct_yaml_object(node, cls) + + @classmethod + def to_yaml(cls, dumper, data): + """ + Convert a Python object to a representation node. + """ + return dumper.represent_yaml_object(cls.yaml_tag, data, cls, + flow_style=cls.yaml_flow_style) + diff --git a/hackaton/lib/python3.12/site-packages/yaml/__pycache__/__init__.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/yaml/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..4dafd3f Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/yaml/__pycache__/__init__.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/yaml/__pycache__/composer.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/yaml/__pycache__/composer.cpython-312.pyc new file mode 100644 index 0000000..9c8ab28 Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/yaml/__pycache__/composer.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/yaml/__pycache__/constructor.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/yaml/__pycache__/constructor.cpython-312.pyc new file mode 100644 index 0000000..7be72c4 Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/yaml/__pycache__/constructor.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/yaml/__pycache__/cyaml.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/yaml/__pycache__/cyaml.cpython-312.pyc new file mode 100644 index 0000000..3ee3c11 Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/yaml/__pycache__/cyaml.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/yaml/__pycache__/dumper.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/yaml/__pycache__/dumper.cpython-312.pyc new file mode 100644 index 0000000..66f469e Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/yaml/__pycache__/dumper.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/yaml/__pycache__/emitter.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/yaml/__pycache__/emitter.cpython-312.pyc new file mode 100644 index 0000000..de4c180 Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/yaml/__pycache__/emitter.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/yaml/__pycache__/error.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/yaml/__pycache__/error.cpython-312.pyc new file mode 100644 index 0000000..093a20e Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/yaml/__pycache__/error.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/yaml/__pycache__/events.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/yaml/__pycache__/events.cpython-312.pyc new file mode 100644 index 0000000..70ff2e3 Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/yaml/__pycache__/events.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/yaml/__pycache__/loader.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/yaml/__pycache__/loader.cpython-312.pyc new file mode 100644 index 0000000..85d8623 Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/yaml/__pycache__/loader.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/yaml/__pycache__/nodes.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/yaml/__pycache__/nodes.cpython-312.pyc new file mode 100644 index 0000000..a86fff1 Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/yaml/__pycache__/nodes.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/yaml/__pycache__/parser.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/yaml/__pycache__/parser.cpython-312.pyc new file mode 100644 index 0000000..9ddc5e1 Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/yaml/__pycache__/parser.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/yaml/__pycache__/reader.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/yaml/__pycache__/reader.cpython-312.pyc new file mode 100644 index 0000000..e9d4fb1 Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/yaml/__pycache__/reader.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/yaml/__pycache__/representer.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/yaml/__pycache__/representer.cpython-312.pyc new file mode 100644 index 0000000..5478c3b Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/yaml/__pycache__/representer.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/yaml/__pycache__/resolver.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/yaml/__pycache__/resolver.cpython-312.pyc new file mode 100644 index 0000000..db690ef Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/yaml/__pycache__/resolver.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/yaml/__pycache__/scanner.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/yaml/__pycache__/scanner.cpython-312.pyc new file mode 100644 index 0000000..17a6ccc Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/yaml/__pycache__/scanner.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/yaml/__pycache__/serializer.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/yaml/__pycache__/serializer.cpython-312.pyc new file mode 100644 index 0000000..622ce04 Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/yaml/__pycache__/serializer.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/yaml/__pycache__/tokens.cpython-312.pyc b/hackaton/lib/python3.12/site-packages/yaml/__pycache__/tokens.cpython-312.pyc new file mode 100644 index 0000000..4adc6fc Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/yaml/__pycache__/tokens.cpython-312.pyc differ diff --git a/hackaton/lib/python3.12/site-packages/yaml/_yaml.cpython-312-darwin.so b/hackaton/lib/python3.12/site-packages/yaml/_yaml.cpython-312-darwin.so new file mode 100755 index 0000000..7f8d5b4 Binary files /dev/null and b/hackaton/lib/python3.12/site-packages/yaml/_yaml.cpython-312-darwin.so differ diff --git a/hackaton/lib/python3.12/site-packages/yaml/composer.py b/hackaton/lib/python3.12/site-packages/yaml/composer.py new file mode 100644 index 0000000..6d15cb4 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/yaml/composer.py @@ -0,0 +1,139 @@ + +__all__ = ['Composer', 'ComposerError'] + +from .error import MarkedYAMLError +from .events import * +from .nodes import * + +class ComposerError(MarkedYAMLError): + pass + +class Composer: + + def __init__(self): + self.anchors = {} + + def check_node(self): + # Drop the STREAM-START event. + if self.check_event(StreamStartEvent): + self.get_event() + + # If there are more documents available? + return not self.check_event(StreamEndEvent) + + def get_node(self): + # Get the root node of the next document. + if not self.check_event(StreamEndEvent): + return self.compose_document() + + def get_single_node(self): + # Drop the STREAM-START event. + self.get_event() + + # Compose a document if the stream is not empty. + document = None + if not self.check_event(StreamEndEvent): + document = self.compose_document() + + # Ensure that the stream contains no more documents. + if not self.check_event(StreamEndEvent): + event = self.get_event() + raise ComposerError("expected a single document in the stream", + document.start_mark, "but found another document", + event.start_mark) + + # Drop the STREAM-END event. + self.get_event() + + return document + + def compose_document(self): + # Drop the DOCUMENT-START event. + self.get_event() + + # Compose the root node. + node = self.compose_node(None, None) + + # Drop the DOCUMENT-END event. + self.get_event() + + self.anchors = {} + return node + + def compose_node(self, parent, index): + if self.check_event(AliasEvent): + event = self.get_event() + anchor = event.anchor + if anchor not in self.anchors: + raise ComposerError(None, None, "found undefined alias %r" + % anchor, event.start_mark) + return self.anchors[anchor] + event = self.peek_event() + anchor = event.anchor + if anchor is not None: + if anchor in self.anchors: + raise ComposerError("found duplicate anchor %r; first occurrence" + % anchor, self.anchors[anchor].start_mark, + "second occurrence", event.start_mark) + self.descend_resolver(parent, index) + if self.check_event(ScalarEvent): + node = self.compose_scalar_node(anchor) + elif self.check_event(SequenceStartEvent): + node = self.compose_sequence_node(anchor) + elif self.check_event(MappingStartEvent): + node = self.compose_mapping_node(anchor) + self.ascend_resolver() + return node + + def compose_scalar_node(self, anchor): + event = self.get_event() + tag = event.tag + if tag is None or tag == '!': + tag = self.resolve(ScalarNode, event.value, event.implicit) + node = ScalarNode(tag, event.value, + event.start_mark, event.end_mark, style=event.style) + if anchor is not None: + self.anchors[anchor] = node + return node + + def compose_sequence_node(self, anchor): + start_event = self.get_event() + tag = start_event.tag + if tag is None or tag == '!': + tag = self.resolve(SequenceNode, None, start_event.implicit) + node = SequenceNode(tag, [], + start_event.start_mark, None, + flow_style=start_event.flow_style) + if anchor is not None: + self.anchors[anchor] = node + index = 0 + while not self.check_event(SequenceEndEvent): + node.value.append(self.compose_node(node, index)) + index += 1 + end_event = self.get_event() + node.end_mark = end_event.end_mark + return node + + def compose_mapping_node(self, anchor): + start_event = self.get_event() + tag = start_event.tag + if tag is None or tag == '!': + tag = self.resolve(MappingNode, None, start_event.implicit) + node = MappingNode(tag, [], + start_event.start_mark, None, + flow_style=start_event.flow_style) + if anchor is not None: + self.anchors[anchor] = node + while not self.check_event(MappingEndEvent): + #key_event = self.peek_event() + item_key = self.compose_node(node, None) + #if item_key in node.value: + # raise ComposerError("while composing a mapping", start_event.start_mark, + # "found duplicate key", key_event.start_mark) + item_value = self.compose_node(node, item_key) + #node.value[item_key] = item_value + node.value.append((item_key, item_value)) + end_event = self.get_event() + node.end_mark = end_event.end_mark + return node + diff --git a/hackaton/lib/python3.12/site-packages/yaml/constructor.py b/hackaton/lib/python3.12/site-packages/yaml/constructor.py new file mode 100644 index 0000000..619acd3 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/yaml/constructor.py @@ -0,0 +1,748 @@ + +__all__ = [ + 'BaseConstructor', + 'SafeConstructor', + 'FullConstructor', + 'UnsafeConstructor', + 'Constructor', + 'ConstructorError' +] + +from .error import * +from .nodes import * + +import collections.abc, datetime, base64, binascii, re, sys, types + +class ConstructorError(MarkedYAMLError): + pass + +class BaseConstructor: + + yaml_constructors = {} + yaml_multi_constructors = {} + + def __init__(self): + self.constructed_objects = {} + self.recursive_objects = {} + self.state_generators = [] + self.deep_construct = False + + def check_data(self): + # If there are more documents available? + return self.check_node() + + def check_state_key(self, key): + """Block special attributes/methods from being set in a newly created + object, to prevent user-controlled methods from being called during + deserialization""" + if self.get_state_keys_blacklist_regexp().match(key): + raise ConstructorError(None, None, + "blacklisted key '%s' in instance state found" % (key,), None) + + def get_data(self): + # Construct and return the next document. + if self.check_node(): + return self.construct_document(self.get_node()) + + def get_single_data(self): + # Ensure that the stream contains a single document and construct it. + node = self.get_single_node() + if node is not None: + return self.construct_document(node) + return None + + def construct_document(self, node): + data = self.construct_object(node) + while self.state_generators: + state_generators = self.state_generators + self.state_generators = [] + for generator in state_generators: + for dummy in generator: + pass + self.constructed_objects = {} + self.recursive_objects = {} + self.deep_construct = False + return data + + def construct_object(self, node, deep=False): + if node in self.constructed_objects: + return self.constructed_objects[node] + if deep: + old_deep = self.deep_construct + self.deep_construct = True + if node in self.recursive_objects: + raise ConstructorError(None, None, + "found unconstructable recursive node", node.start_mark) + self.recursive_objects[node] = None + constructor = None + tag_suffix = None + if node.tag in self.yaml_constructors: + constructor = self.yaml_constructors[node.tag] + else: + for tag_prefix in self.yaml_multi_constructors: + if tag_prefix is not None and node.tag.startswith(tag_prefix): + tag_suffix = node.tag[len(tag_prefix):] + constructor = self.yaml_multi_constructors[tag_prefix] + break + else: + if None in self.yaml_multi_constructors: + tag_suffix = node.tag + constructor = self.yaml_multi_constructors[None] + elif None in self.yaml_constructors: + constructor = self.yaml_constructors[None] + elif isinstance(node, ScalarNode): + constructor = self.__class__.construct_scalar + elif isinstance(node, SequenceNode): + constructor = self.__class__.construct_sequence + elif isinstance(node, MappingNode): + constructor = self.__class__.construct_mapping + if tag_suffix is None: + data = constructor(self, node) + else: + data = constructor(self, tag_suffix, node) + if isinstance(data, types.GeneratorType): + generator = data + data = next(generator) + if self.deep_construct: + for dummy in generator: + pass + else: + self.state_generators.append(generator) + self.constructed_objects[node] = data + del self.recursive_objects[node] + if deep: + self.deep_construct = old_deep + return data + + def construct_scalar(self, node): + if not isinstance(node, ScalarNode): + raise ConstructorError(None, None, + "expected a scalar node, but found %s" % node.id, + node.start_mark) + return node.value + + def construct_sequence(self, node, deep=False): + if not isinstance(node, SequenceNode): + raise ConstructorError(None, None, + "expected a sequence node, but found %s" % node.id, + node.start_mark) + return [self.construct_object(child, deep=deep) + for child in node.value] + + def construct_mapping(self, node, deep=False): + if not isinstance(node, MappingNode): + raise ConstructorError(None, None, + "expected a mapping node, but found %s" % node.id, + node.start_mark) + mapping = {} + for key_node, value_node in node.value: + key = self.construct_object(key_node, deep=deep) + if not isinstance(key, collections.abc.Hashable): + raise ConstructorError("while constructing a mapping", node.start_mark, + "found unhashable key", key_node.start_mark) + value = self.construct_object(value_node, deep=deep) + mapping[key] = value + return mapping + + def construct_pairs(self, node, deep=False): + if not isinstance(node, MappingNode): + raise ConstructorError(None, None, + "expected a mapping node, but found %s" % node.id, + node.start_mark) + pairs = [] + for key_node, value_node in node.value: + key = self.construct_object(key_node, deep=deep) + value = self.construct_object(value_node, deep=deep) + pairs.append((key, value)) + return pairs + + @classmethod + def add_constructor(cls, tag, constructor): + if not 'yaml_constructors' in cls.__dict__: + cls.yaml_constructors = cls.yaml_constructors.copy() + cls.yaml_constructors[tag] = constructor + + @classmethod + def add_multi_constructor(cls, tag_prefix, multi_constructor): + if not 'yaml_multi_constructors' in cls.__dict__: + cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy() + cls.yaml_multi_constructors[tag_prefix] = multi_constructor + +class SafeConstructor(BaseConstructor): + + def construct_scalar(self, node): + if isinstance(node, MappingNode): + for key_node, value_node in node.value: + if key_node.tag == 'tag:yaml.org,2002:value': + return self.construct_scalar(value_node) + return super().construct_scalar(node) + + def flatten_mapping(self, node): + merge = [] + index = 0 + while index < len(node.value): + key_node, value_node = node.value[index] + if key_node.tag == 'tag:yaml.org,2002:merge': + del node.value[index] + if isinstance(value_node, MappingNode): + self.flatten_mapping(value_node) + merge.extend(value_node.value) + elif isinstance(value_node, SequenceNode): + submerge = [] + for subnode in value_node.value: + if not isinstance(subnode, MappingNode): + raise ConstructorError("while constructing a mapping", + node.start_mark, + "expected a mapping for merging, but found %s" + % subnode.id, subnode.start_mark) + self.flatten_mapping(subnode) + submerge.append(subnode.value) + submerge.reverse() + for value in submerge: + merge.extend(value) + else: + raise ConstructorError("while constructing a mapping", node.start_mark, + "expected a mapping or list of mappings for merging, but found %s" + % value_node.id, value_node.start_mark) + elif key_node.tag == 'tag:yaml.org,2002:value': + key_node.tag = 'tag:yaml.org,2002:str' + index += 1 + else: + index += 1 + if merge: + node.value = merge + node.value + + def construct_mapping(self, node, deep=False): + if isinstance(node, MappingNode): + self.flatten_mapping(node) + return super().construct_mapping(node, deep=deep) + + def construct_yaml_null(self, node): + self.construct_scalar(node) + return None + + bool_values = { + 'yes': True, + 'no': False, + 'true': True, + 'false': False, + 'on': True, + 'off': False, + } + + def construct_yaml_bool(self, node): + value = self.construct_scalar(node) + return self.bool_values[value.lower()] + + def construct_yaml_int(self, node): + value = self.construct_scalar(node) + value = value.replace('_', '') + sign = +1 + if value[0] == '-': + sign = -1 + if value[0] in '+-': + value = value[1:] + if value == '0': + return 0 + elif value.startswith('0b'): + return sign*int(value[2:], 2) + elif value.startswith('0x'): + return sign*int(value[2:], 16) + elif value[0] == '0': + return sign*int(value, 8) + elif ':' in value: + digits = [int(part) for part in value.split(':')] + digits.reverse() + base = 1 + value = 0 + for digit in digits: + value += digit*base + base *= 60 + return sign*value + else: + return sign*int(value) + + inf_value = 1e300 + while inf_value != inf_value*inf_value: + inf_value *= inf_value + nan_value = -inf_value/inf_value # Trying to make a quiet NaN (like C99). + + def construct_yaml_float(self, node): + value = self.construct_scalar(node) + value = value.replace('_', '').lower() + sign = +1 + if value[0] == '-': + sign = -1 + if value[0] in '+-': + value = value[1:] + if value == '.inf': + return sign*self.inf_value + elif value == '.nan': + return self.nan_value + elif ':' in value: + digits = [float(part) for part in value.split(':')] + digits.reverse() + base = 1 + value = 0.0 + for digit in digits: + value += digit*base + base *= 60 + return sign*value + else: + return sign*float(value) + + def construct_yaml_binary(self, node): + try: + value = self.construct_scalar(node).encode('ascii') + except UnicodeEncodeError as exc: + raise ConstructorError(None, None, + "failed to convert base64 data into ascii: %s" % exc, + node.start_mark) + try: + if hasattr(base64, 'decodebytes'): + return base64.decodebytes(value) + else: + return base64.decodestring(value) + except binascii.Error as exc: + raise ConstructorError(None, None, + "failed to decode base64 data: %s" % exc, node.start_mark) + + timestamp_regexp = re.compile( + r'''^(?P[0-9][0-9][0-9][0-9]) + -(?P[0-9][0-9]?) + -(?P[0-9][0-9]?) + (?:(?:[Tt]|[ \t]+) + (?P[0-9][0-9]?) + :(?P[0-9][0-9]) + :(?P[0-9][0-9]) + (?:\.(?P[0-9]*))? + (?:[ \t]*(?PZ|(?P[-+])(?P[0-9][0-9]?) + (?::(?P[0-9][0-9]))?))?)?$''', re.X) + + def construct_yaml_timestamp(self, node): + value = self.construct_scalar(node) + match = self.timestamp_regexp.match(node.value) + values = match.groupdict() + year = int(values['year']) + month = int(values['month']) + day = int(values['day']) + if not values['hour']: + return datetime.date(year, month, day) + hour = int(values['hour']) + minute = int(values['minute']) + second = int(values['second']) + fraction = 0 + tzinfo = None + if values['fraction']: + fraction = values['fraction'][:6] + while len(fraction) < 6: + fraction += '0' + fraction = int(fraction) + if values['tz_sign']: + tz_hour = int(values['tz_hour']) + tz_minute = int(values['tz_minute'] or 0) + delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute) + if values['tz_sign'] == '-': + delta = -delta + tzinfo = datetime.timezone(delta) + elif values['tz']: + tzinfo = datetime.timezone.utc + return datetime.datetime(year, month, day, hour, minute, second, fraction, + tzinfo=tzinfo) + + def construct_yaml_omap(self, node): + # Note: we do not check for duplicate keys, because it's too + # CPU-expensive. + omap = [] + yield omap + if not isinstance(node, SequenceNode): + raise ConstructorError("while constructing an ordered map", node.start_mark, + "expected a sequence, but found %s" % node.id, node.start_mark) + for subnode in node.value: + if not isinstance(subnode, MappingNode): + raise ConstructorError("while constructing an ordered map", node.start_mark, + "expected a mapping of length 1, but found %s" % subnode.id, + subnode.start_mark) + if len(subnode.value) != 1: + raise ConstructorError("while constructing an ordered map", node.start_mark, + "expected a single mapping item, but found %d items" % len(subnode.value), + subnode.start_mark) + key_node, value_node = subnode.value[0] + key = self.construct_object(key_node) + value = self.construct_object(value_node) + omap.append((key, value)) + + def construct_yaml_pairs(self, node): + # Note: the same code as `construct_yaml_omap`. + pairs = [] + yield pairs + if not isinstance(node, SequenceNode): + raise ConstructorError("while constructing pairs", node.start_mark, + "expected a sequence, but found %s" % node.id, node.start_mark) + for subnode in node.value: + if not isinstance(subnode, MappingNode): + raise ConstructorError("while constructing pairs", node.start_mark, + "expected a mapping of length 1, but found %s" % subnode.id, + subnode.start_mark) + if len(subnode.value) != 1: + raise ConstructorError("while constructing pairs", node.start_mark, + "expected a single mapping item, but found %d items" % len(subnode.value), + subnode.start_mark) + key_node, value_node = subnode.value[0] + key = self.construct_object(key_node) + value = self.construct_object(value_node) + pairs.append((key, value)) + + def construct_yaml_set(self, node): + data = set() + yield data + value = self.construct_mapping(node) + data.update(value) + + def construct_yaml_str(self, node): + return self.construct_scalar(node) + + def construct_yaml_seq(self, node): + data = [] + yield data + data.extend(self.construct_sequence(node)) + + def construct_yaml_map(self, node): + data = {} + yield data + value = self.construct_mapping(node) + data.update(value) + + def construct_yaml_object(self, node, cls): + data = cls.__new__(cls) + yield data + if hasattr(data, '__setstate__'): + state = self.construct_mapping(node, deep=True) + data.__setstate__(state) + else: + state = self.construct_mapping(node) + data.__dict__.update(state) + + def construct_undefined(self, node): + raise ConstructorError(None, None, + "could not determine a constructor for the tag %r" % node.tag, + node.start_mark) + +SafeConstructor.add_constructor( + 'tag:yaml.org,2002:null', + SafeConstructor.construct_yaml_null) + +SafeConstructor.add_constructor( + 'tag:yaml.org,2002:bool', + SafeConstructor.construct_yaml_bool) + +SafeConstructor.add_constructor( + 'tag:yaml.org,2002:int', + SafeConstructor.construct_yaml_int) + +SafeConstructor.add_constructor( + 'tag:yaml.org,2002:float', + SafeConstructor.construct_yaml_float) + +SafeConstructor.add_constructor( + 'tag:yaml.org,2002:binary', + SafeConstructor.construct_yaml_binary) + +SafeConstructor.add_constructor( + 'tag:yaml.org,2002:timestamp', + SafeConstructor.construct_yaml_timestamp) + +SafeConstructor.add_constructor( + 'tag:yaml.org,2002:omap', + SafeConstructor.construct_yaml_omap) + +SafeConstructor.add_constructor( + 'tag:yaml.org,2002:pairs', + SafeConstructor.construct_yaml_pairs) + +SafeConstructor.add_constructor( + 'tag:yaml.org,2002:set', + SafeConstructor.construct_yaml_set) + +SafeConstructor.add_constructor( + 'tag:yaml.org,2002:str', + SafeConstructor.construct_yaml_str) + +SafeConstructor.add_constructor( + 'tag:yaml.org,2002:seq', + SafeConstructor.construct_yaml_seq) + +SafeConstructor.add_constructor( + 'tag:yaml.org,2002:map', + SafeConstructor.construct_yaml_map) + +SafeConstructor.add_constructor(None, + SafeConstructor.construct_undefined) + +class FullConstructor(SafeConstructor): + # 'extend' is blacklisted because it is used by + # construct_python_object_apply to add `listitems` to a newly generate + # python instance + def get_state_keys_blacklist(self): + return ['^extend$', '^__.*__$'] + + def get_state_keys_blacklist_regexp(self): + if not hasattr(self, 'state_keys_blacklist_regexp'): + self.state_keys_blacklist_regexp = re.compile('(' + '|'.join(self.get_state_keys_blacklist()) + ')') + return self.state_keys_blacklist_regexp + + def construct_python_str(self, node): + return self.construct_scalar(node) + + def construct_python_unicode(self, node): + return self.construct_scalar(node) + + def construct_python_bytes(self, node): + try: + value = self.construct_scalar(node).encode('ascii') + except UnicodeEncodeError as exc: + raise ConstructorError(None, None, + "failed to convert base64 data into ascii: %s" % exc, + node.start_mark) + try: + if hasattr(base64, 'decodebytes'): + return base64.decodebytes(value) + else: + return base64.decodestring(value) + except binascii.Error as exc: + raise ConstructorError(None, None, + "failed to decode base64 data: %s" % exc, node.start_mark) + + def construct_python_long(self, node): + return self.construct_yaml_int(node) + + def construct_python_complex(self, node): + return complex(self.construct_scalar(node)) + + def construct_python_tuple(self, node): + return tuple(self.construct_sequence(node)) + + def find_python_module(self, name, mark, unsafe=False): + if not name: + raise ConstructorError("while constructing a Python module", mark, + "expected non-empty name appended to the tag", mark) + if unsafe: + try: + __import__(name) + except ImportError as exc: + raise ConstructorError("while constructing a Python module", mark, + "cannot find module %r (%s)" % (name, exc), mark) + if name not in sys.modules: + raise ConstructorError("while constructing a Python module", mark, + "module %r is not imported" % name, mark) + return sys.modules[name] + + def find_python_name(self, name, mark, unsafe=False): + if not name: + raise ConstructorError("while constructing a Python object", mark, + "expected non-empty name appended to the tag", mark) + if '.' in name: + module_name, object_name = name.rsplit('.', 1) + else: + module_name = 'builtins' + object_name = name + if unsafe: + try: + __import__(module_name) + except ImportError as exc: + raise ConstructorError("while constructing a Python object", mark, + "cannot find module %r (%s)" % (module_name, exc), mark) + if module_name not in sys.modules: + raise ConstructorError("while constructing a Python object", mark, + "module %r is not imported" % module_name, mark) + module = sys.modules[module_name] + if not hasattr(module, object_name): + raise ConstructorError("while constructing a Python object", mark, + "cannot find %r in the module %r" + % (object_name, module.__name__), mark) + return getattr(module, object_name) + + def construct_python_name(self, suffix, node): + value = self.construct_scalar(node) + if value: + raise ConstructorError("while constructing a Python name", node.start_mark, + "expected the empty value, but found %r" % value, node.start_mark) + return self.find_python_name(suffix, node.start_mark) + + def construct_python_module(self, suffix, node): + value = self.construct_scalar(node) + if value: + raise ConstructorError("while constructing a Python module", node.start_mark, + "expected the empty value, but found %r" % value, node.start_mark) + return self.find_python_module(suffix, node.start_mark) + + def make_python_instance(self, suffix, node, + args=None, kwds=None, newobj=False, unsafe=False): + if not args: + args = [] + if not kwds: + kwds = {} + cls = self.find_python_name(suffix, node.start_mark) + if not (unsafe or isinstance(cls, type)): + raise ConstructorError("while constructing a Python instance", node.start_mark, + "expected a class, but found %r" % type(cls), + node.start_mark) + if newobj and isinstance(cls, type): + return cls.__new__(cls, *args, **kwds) + else: + return cls(*args, **kwds) + + def set_python_instance_state(self, instance, state, unsafe=False): + if hasattr(instance, '__setstate__'): + instance.__setstate__(state) + else: + slotstate = {} + if isinstance(state, tuple) and len(state) == 2: + state, slotstate = state + if hasattr(instance, '__dict__'): + if not unsafe and state: + for key in state.keys(): + self.check_state_key(key) + instance.__dict__.update(state) + elif state: + slotstate.update(state) + for key, value in slotstate.items(): + if not unsafe: + self.check_state_key(key) + setattr(instance, key, value) + + def construct_python_object(self, suffix, node): + # Format: + # !!python/object:module.name { ... state ... } + instance = self.make_python_instance(suffix, node, newobj=True) + yield instance + deep = hasattr(instance, '__setstate__') + state = self.construct_mapping(node, deep=deep) + self.set_python_instance_state(instance, state) + + def construct_python_object_apply(self, suffix, node, newobj=False): + # Format: + # !!python/object/apply # (or !!python/object/new) + # args: [ ... arguments ... ] + # kwds: { ... keywords ... } + # state: ... state ... + # listitems: [ ... listitems ... ] + # dictitems: { ... dictitems ... } + # or short format: + # !!python/object/apply [ ... arguments ... ] + # The difference between !!python/object/apply and !!python/object/new + # is how an object is created, check make_python_instance for details. + if isinstance(node, SequenceNode): + args = self.construct_sequence(node, deep=True) + kwds = {} + state = {} + listitems = [] + dictitems = {} + else: + value = self.construct_mapping(node, deep=True) + args = value.get('args', []) + kwds = value.get('kwds', {}) + state = value.get('state', {}) + listitems = value.get('listitems', []) + dictitems = value.get('dictitems', {}) + instance = self.make_python_instance(suffix, node, args, kwds, newobj) + if state: + self.set_python_instance_state(instance, state) + if listitems: + instance.extend(listitems) + if dictitems: + for key in dictitems: + instance[key] = dictitems[key] + return instance + + def construct_python_object_new(self, suffix, node): + return self.construct_python_object_apply(suffix, node, newobj=True) + +FullConstructor.add_constructor( + 'tag:yaml.org,2002:python/none', + FullConstructor.construct_yaml_null) + +FullConstructor.add_constructor( + 'tag:yaml.org,2002:python/bool', + FullConstructor.construct_yaml_bool) + +FullConstructor.add_constructor( + 'tag:yaml.org,2002:python/str', + FullConstructor.construct_python_str) + +FullConstructor.add_constructor( + 'tag:yaml.org,2002:python/unicode', + FullConstructor.construct_python_unicode) + +FullConstructor.add_constructor( + 'tag:yaml.org,2002:python/bytes', + FullConstructor.construct_python_bytes) + +FullConstructor.add_constructor( + 'tag:yaml.org,2002:python/int', + FullConstructor.construct_yaml_int) + +FullConstructor.add_constructor( + 'tag:yaml.org,2002:python/long', + FullConstructor.construct_python_long) + +FullConstructor.add_constructor( + 'tag:yaml.org,2002:python/float', + FullConstructor.construct_yaml_float) + +FullConstructor.add_constructor( + 'tag:yaml.org,2002:python/complex', + FullConstructor.construct_python_complex) + +FullConstructor.add_constructor( + 'tag:yaml.org,2002:python/list', + FullConstructor.construct_yaml_seq) + +FullConstructor.add_constructor( + 'tag:yaml.org,2002:python/tuple', + FullConstructor.construct_python_tuple) + +FullConstructor.add_constructor( + 'tag:yaml.org,2002:python/dict', + FullConstructor.construct_yaml_map) + +FullConstructor.add_multi_constructor( + 'tag:yaml.org,2002:python/name:', + FullConstructor.construct_python_name) + +class UnsafeConstructor(FullConstructor): + + def find_python_module(self, name, mark): + return super(UnsafeConstructor, self).find_python_module(name, mark, unsafe=True) + + def find_python_name(self, name, mark): + return super(UnsafeConstructor, self).find_python_name(name, mark, unsafe=True) + + def make_python_instance(self, suffix, node, args=None, kwds=None, newobj=False): + return super(UnsafeConstructor, self).make_python_instance( + suffix, node, args, kwds, newobj, unsafe=True) + + def set_python_instance_state(self, instance, state): + return super(UnsafeConstructor, self).set_python_instance_state( + instance, state, unsafe=True) + +UnsafeConstructor.add_multi_constructor( + 'tag:yaml.org,2002:python/module:', + UnsafeConstructor.construct_python_module) + +UnsafeConstructor.add_multi_constructor( + 'tag:yaml.org,2002:python/object:', + UnsafeConstructor.construct_python_object) + +UnsafeConstructor.add_multi_constructor( + 'tag:yaml.org,2002:python/object/new:', + UnsafeConstructor.construct_python_object_new) + +UnsafeConstructor.add_multi_constructor( + 'tag:yaml.org,2002:python/object/apply:', + UnsafeConstructor.construct_python_object_apply) + +# Constructor is same as UnsafeConstructor. Need to leave this in place in case +# people have extended it directly. +class Constructor(UnsafeConstructor): + pass diff --git a/hackaton/lib/python3.12/site-packages/yaml/cyaml.py b/hackaton/lib/python3.12/site-packages/yaml/cyaml.py new file mode 100644 index 0000000..0c21345 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/yaml/cyaml.py @@ -0,0 +1,101 @@ + +__all__ = [ + 'CBaseLoader', 'CSafeLoader', 'CFullLoader', 'CUnsafeLoader', 'CLoader', + 'CBaseDumper', 'CSafeDumper', 'CDumper' +] + +from yaml._yaml import CParser, CEmitter + +from .constructor import * + +from .serializer import * +from .representer import * + +from .resolver import * + +class CBaseLoader(CParser, BaseConstructor, BaseResolver): + + def __init__(self, stream): + CParser.__init__(self, stream) + BaseConstructor.__init__(self) + BaseResolver.__init__(self) + +class CSafeLoader(CParser, SafeConstructor, Resolver): + + def __init__(self, stream): + CParser.__init__(self, stream) + SafeConstructor.__init__(self) + Resolver.__init__(self) + +class CFullLoader(CParser, FullConstructor, Resolver): + + def __init__(self, stream): + CParser.__init__(self, stream) + FullConstructor.__init__(self) + Resolver.__init__(self) + +class CUnsafeLoader(CParser, UnsafeConstructor, Resolver): + + def __init__(self, stream): + CParser.__init__(self, stream) + UnsafeConstructor.__init__(self) + Resolver.__init__(self) + +class CLoader(CParser, Constructor, Resolver): + + def __init__(self, stream): + CParser.__init__(self, stream) + Constructor.__init__(self) + Resolver.__init__(self) + +class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver): + + def __init__(self, stream, + default_style=None, default_flow_style=False, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None, sort_keys=True): + CEmitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, encoding=encoding, + allow_unicode=allow_unicode, line_break=line_break, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + Representer.__init__(self, default_style=default_style, + default_flow_style=default_flow_style, sort_keys=sort_keys) + Resolver.__init__(self) + +class CSafeDumper(CEmitter, SafeRepresenter, Resolver): + + def __init__(self, stream, + default_style=None, default_flow_style=False, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None, sort_keys=True): + CEmitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, encoding=encoding, + allow_unicode=allow_unicode, line_break=line_break, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + SafeRepresenter.__init__(self, default_style=default_style, + default_flow_style=default_flow_style, sort_keys=sort_keys) + Resolver.__init__(self) + +class CDumper(CEmitter, Serializer, Representer, Resolver): + + def __init__(self, stream, + default_style=None, default_flow_style=False, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None, sort_keys=True): + CEmitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, encoding=encoding, + allow_unicode=allow_unicode, line_break=line_break, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + Representer.__init__(self, default_style=default_style, + default_flow_style=default_flow_style, sort_keys=sort_keys) + Resolver.__init__(self) + diff --git a/hackaton/lib/python3.12/site-packages/yaml/dumper.py b/hackaton/lib/python3.12/site-packages/yaml/dumper.py new file mode 100644 index 0000000..6aadba5 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/yaml/dumper.py @@ -0,0 +1,62 @@ + +__all__ = ['BaseDumper', 'SafeDumper', 'Dumper'] + +from .emitter import * +from .serializer import * +from .representer import * +from .resolver import * + +class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver): + + def __init__(self, stream, + default_style=None, default_flow_style=False, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None, sort_keys=True): + Emitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break) + Serializer.__init__(self, encoding=encoding, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + Representer.__init__(self, default_style=default_style, + default_flow_style=default_flow_style, sort_keys=sort_keys) + Resolver.__init__(self) + +class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver): + + def __init__(self, stream, + default_style=None, default_flow_style=False, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None, sort_keys=True): + Emitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break) + Serializer.__init__(self, encoding=encoding, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + SafeRepresenter.__init__(self, default_style=default_style, + default_flow_style=default_flow_style, sort_keys=sort_keys) + Resolver.__init__(self) + +class Dumper(Emitter, Serializer, Representer, Resolver): + + def __init__(self, stream, + default_style=None, default_flow_style=False, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None, sort_keys=True): + Emitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break) + Serializer.__init__(self, encoding=encoding, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + Representer.__init__(self, default_style=default_style, + default_flow_style=default_flow_style, sort_keys=sort_keys) + Resolver.__init__(self) + diff --git a/hackaton/lib/python3.12/site-packages/yaml/emitter.py b/hackaton/lib/python3.12/site-packages/yaml/emitter.py new file mode 100644 index 0000000..a664d01 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/yaml/emitter.py @@ -0,0 +1,1137 @@ + +# Emitter expects events obeying the following grammar: +# stream ::= STREAM-START document* STREAM-END +# document ::= DOCUMENT-START node DOCUMENT-END +# node ::= SCALAR | sequence | mapping +# sequence ::= SEQUENCE-START node* SEQUENCE-END +# mapping ::= MAPPING-START (node node)* MAPPING-END + +__all__ = ['Emitter', 'EmitterError'] + +from .error import YAMLError +from .events import * + +class EmitterError(YAMLError): + pass + +class ScalarAnalysis: + def __init__(self, scalar, empty, multiline, + allow_flow_plain, allow_block_plain, + allow_single_quoted, allow_double_quoted, + allow_block): + self.scalar = scalar + self.empty = empty + self.multiline = multiline + self.allow_flow_plain = allow_flow_plain + self.allow_block_plain = allow_block_plain + self.allow_single_quoted = allow_single_quoted + self.allow_double_quoted = allow_double_quoted + self.allow_block = allow_block + +class Emitter: + + DEFAULT_TAG_PREFIXES = { + '!' : '!', + 'tag:yaml.org,2002:' : '!!', + } + + def __init__(self, stream, canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None): + + # The stream should have the methods `write` and possibly `flush`. + self.stream = stream + + # Encoding can be overridden by STREAM-START. + self.encoding = None + + # Emitter is a state machine with a stack of states to handle nested + # structures. + self.states = [] + self.state = self.expect_stream_start + + # Current event and the event queue. + self.events = [] + self.event = None + + # The current indentation level and the stack of previous indents. + self.indents = [] + self.indent = None + + # Flow level. + self.flow_level = 0 + + # Contexts. + self.root_context = False + self.sequence_context = False + self.mapping_context = False + self.simple_key_context = False + + # Characteristics of the last emitted character: + # - current position. + # - is it a whitespace? + # - is it an indention character + # (indentation space, '-', '?', or ':')? + self.line = 0 + self.column = 0 + self.whitespace = True + self.indention = True + + # Whether the document requires an explicit document indicator + self.open_ended = False + + # Formatting details. + self.canonical = canonical + self.allow_unicode = allow_unicode + self.best_indent = 2 + if indent and 1 < indent < 10: + self.best_indent = indent + self.best_width = 80 + if width and width > self.best_indent*2: + self.best_width = width + self.best_line_break = '\n' + if line_break in ['\r', '\n', '\r\n']: + self.best_line_break = line_break + + # Tag prefixes. + self.tag_prefixes = None + + # Prepared anchor and tag. + self.prepared_anchor = None + self.prepared_tag = None + + # Scalar analysis and style. + self.analysis = None + self.style = None + + def dispose(self): + # Reset the state attributes (to clear self-references) + self.states = [] + self.state = None + + def emit(self, event): + self.events.append(event) + while not self.need_more_events(): + self.event = self.events.pop(0) + self.state() + self.event = None + + # In some cases, we wait for a few next events before emitting. + + def need_more_events(self): + if not self.events: + return True + event = self.events[0] + if isinstance(event, DocumentStartEvent): + return self.need_events(1) + elif isinstance(event, SequenceStartEvent): + return self.need_events(2) + elif isinstance(event, MappingStartEvent): + return self.need_events(3) + else: + return False + + def need_events(self, count): + level = 0 + for event in self.events[1:]: + if isinstance(event, (DocumentStartEvent, CollectionStartEvent)): + level += 1 + elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)): + level -= 1 + elif isinstance(event, StreamEndEvent): + level = -1 + if level < 0: + return False + return (len(self.events) < count+1) + + def increase_indent(self, flow=False, indentless=False): + self.indents.append(self.indent) + if self.indent is None: + if flow: + self.indent = self.best_indent + else: + self.indent = 0 + elif not indentless: + self.indent += self.best_indent + + # States. + + # Stream handlers. + + def expect_stream_start(self): + if isinstance(self.event, StreamStartEvent): + if self.event.encoding and not hasattr(self.stream, 'encoding'): + self.encoding = self.event.encoding + self.write_stream_start() + self.state = self.expect_first_document_start + else: + raise EmitterError("expected StreamStartEvent, but got %s" + % self.event) + + def expect_nothing(self): + raise EmitterError("expected nothing, but got %s" % self.event) + + # Document handlers. + + def expect_first_document_start(self): + return self.expect_document_start(first=True) + + def expect_document_start(self, first=False): + if isinstance(self.event, DocumentStartEvent): + if (self.event.version or self.event.tags) and self.open_ended: + self.write_indicator('...', True) + self.write_indent() + if self.event.version: + version_text = self.prepare_version(self.event.version) + self.write_version_directive(version_text) + self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy() + if self.event.tags: + handles = sorted(self.event.tags.keys()) + for handle in handles: + prefix = self.event.tags[handle] + self.tag_prefixes[prefix] = handle + handle_text = self.prepare_tag_handle(handle) + prefix_text = self.prepare_tag_prefix(prefix) + self.write_tag_directive(handle_text, prefix_text) + implicit = (first and not self.event.explicit and not self.canonical + and not self.event.version and not self.event.tags + and not self.check_empty_document()) + if not implicit: + self.write_indent() + self.write_indicator('---', True) + if self.canonical: + self.write_indent() + self.state = self.expect_document_root + elif isinstance(self.event, StreamEndEvent): + if self.open_ended: + self.write_indicator('...', True) + self.write_indent() + self.write_stream_end() + self.state = self.expect_nothing + else: + raise EmitterError("expected DocumentStartEvent, but got %s" + % self.event) + + def expect_document_end(self): + if isinstance(self.event, DocumentEndEvent): + self.write_indent() + if self.event.explicit: + self.write_indicator('...', True) + self.write_indent() + self.flush_stream() + self.state = self.expect_document_start + else: + raise EmitterError("expected DocumentEndEvent, but got %s" + % self.event) + + def expect_document_root(self): + self.states.append(self.expect_document_end) + self.expect_node(root=True) + + # Node handlers. + + def expect_node(self, root=False, sequence=False, mapping=False, + simple_key=False): + self.root_context = root + self.sequence_context = sequence + self.mapping_context = mapping + self.simple_key_context = simple_key + if isinstance(self.event, AliasEvent): + self.expect_alias() + elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)): + self.process_anchor('&') + self.process_tag() + if isinstance(self.event, ScalarEvent): + self.expect_scalar() + elif isinstance(self.event, SequenceStartEvent): + if self.flow_level or self.canonical or self.event.flow_style \ + or self.check_empty_sequence(): + self.expect_flow_sequence() + else: + self.expect_block_sequence() + elif isinstance(self.event, MappingStartEvent): + if self.flow_level or self.canonical or self.event.flow_style \ + or self.check_empty_mapping(): + self.expect_flow_mapping() + else: + self.expect_block_mapping() + else: + raise EmitterError("expected NodeEvent, but got %s" % self.event) + + def expect_alias(self): + if self.event.anchor is None: + raise EmitterError("anchor is not specified for alias") + self.process_anchor('*') + self.state = self.states.pop() + + def expect_scalar(self): + self.increase_indent(flow=True) + self.process_scalar() + self.indent = self.indents.pop() + self.state = self.states.pop() + + # Flow sequence handlers. + + def expect_flow_sequence(self): + self.write_indicator('[', True, whitespace=True) + self.flow_level += 1 + self.increase_indent(flow=True) + self.state = self.expect_first_flow_sequence_item + + def expect_first_flow_sequence_item(self): + if isinstance(self.event, SequenceEndEvent): + self.indent = self.indents.pop() + self.flow_level -= 1 + self.write_indicator(']', False) + self.state = self.states.pop() + else: + if self.canonical or self.column > self.best_width: + self.write_indent() + self.states.append(self.expect_flow_sequence_item) + self.expect_node(sequence=True) + + def expect_flow_sequence_item(self): + if isinstance(self.event, SequenceEndEvent): + self.indent = self.indents.pop() + self.flow_level -= 1 + if self.canonical: + self.write_indicator(',', False) + self.write_indent() + self.write_indicator(']', False) + self.state = self.states.pop() + else: + self.write_indicator(',', False) + if self.canonical or self.column > self.best_width: + self.write_indent() + self.states.append(self.expect_flow_sequence_item) + self.expect_node(sequence=True) + + # Flow mapping handlers. + + def expect_flow_mapping(self): + self.write_indicator('{', True, whitespace=True) + self.flow_level += 1 + self.increase_indent(flow=True) + self.state = self.expect_first_flow_mapping_key + + def expect_first_flow_mapping_key(self): + if isinstance(self.event, MappingEndEvent): + self.indent = self.indents.pop() + self.flow_level -= 1 + self.write_indicator('}', False) + self.state = self.states.pop() + else: + if self.canonical or self.column > self.best_width: + self.write_indent() + if not self.canonical and self.check_simple_key(): + self.states.append(self.expect_flow_mapping_simple_value) + self.expect_node(mapping=True, simple_key=True) + else: + self.write_indicator('?', True) + self.states.append(self.expect_flow_mapping_value) + self.expect_node(mapping=True) + + def expect_flow_mapping_key(self): + if isinstance(self.event, MappingEndEvent): + self.indent = self.indents.pop() + self.flow_level -= 1 + if self.canonical: + self.write_indicator(',', False) + self.write_indent() + self.write_indicator('}', False) + self.state = self.states.pop() + else: + self.write_indicator(',', False) + if self.canonical or self.column > self.best_width: + self.write_indent() + if not self.canonical and self.check_simple_key(): + self.states.append(self.expect_flow_mapping_simple_value) + self.expect_node(mapping=True, simple_key=True) + else: + self.write_indicator('?', True) + self.states.append(self.expect_flow_mapping_value) + self.expect_node(mapping=True) + + def expect_flow_mapping_simple_value(self): + self.write_indicator(':', False) + self.states.append(self.expect_flow_mapping_key) + self.expect_node(mapping=True) + + def expect_flow_mapping_value(self): + if self.canonical or self.column > self.best_width: + self.write_indent() + self.write_indicator(':', True) + self.states.append(self.expect_flow_mapping_key) + self.expect_node(mapping=True) + + # Block sequence handlers. + + def expect_block_sequence(self): + indentless = (self.mapping_context and not self.indention) + self.increase_indent(flow=False, indentless=indentless) + self.state = self.expect_first_block_sequence_item + + def expect_first_block_sequence_item(self): + return self.expect_block_sequence_item(first=True) + + def expect_block_sequence_item(self, first=False): + if not first and isinstance(self.event, SequenceEndEvent): + self.indent = self.indents.pop() + self.state = self.states.pop() + else: + self.write_indent() + self.write_indicator('-', True, indention=True) + self.states.append(self.expect_block_sequence_item) + self.expect_node(sequence=True) + + # Block mapping handlers. + + def expect_block_mapping(self): + self.increase_indent(flow=False) + self.state = self.expect_first_block_mapping_key + + def expect_first_block_mapping_key(self): + return self.expect_block_mapping_key(first=True) + + def expect_block_mapping_key(self, first=False): + if not first and isinstance(self.event, MappingEndEvent): + self.indent = self.indents.pop() + self.state = self.states.pop() + else: + self.write_indent() + if self.check_simple_key(): + self.states.append(self.expect_block_mapping_simple_value) + self.expect_node(mapping=True, simple_key=True) + else: + self.write_indicator('?', True, indention=True) + self.states.append(self.expect_block_mapping_value) + self.expect_node(mapping=True) + + def expect_block_mapping_simple_value(self): + self.write_indicator(':', False) + self.states.append(self.expect_block_mapping_key) + self.expect_node(mapping=True) + + def expect_block_mapping_value(self): + self.write_indent() + self.write_indicator(':', True, indention=True) + self.states.append(self.expect_block_mapping_key) + self.expect_node(mapping=True) + + # Checkers. + + def check_empty_sequence(self): + return (isinstance(self.event, SequenceStartEvent) and self.events + and isinstance(self.events[0], SequenceEndEvent)) + + def check_empty_mapping(self): + return (isinstance(self.event, MappingStartEvent) and self.events + and isinstance(self.events[0], MappingEndEvent)) + + def check_empty_document(self): + if not isinstance(self.event, DocumentStartEvent) or not self.events: + return False + event = self.events[0] + return (isinstance(event, ScalarEvent) and event.anchor is None + and event.tag is None and event.implicit and event.value == '') + + def check_simple_key(self): + length = 0 + if isinstance(self.event, NodeEvent) and self.event.anchor is not None: + if self.prepared_anchor is None: + self.prepared_anchor = self.prepare_anchor(self.event.anchor) + length += len(self.prepared_anchor) + if isinstance(self.event, (ScalarEvent, CollectionStartEvent)) \ + and self.event.tag is not None: + if self.prepared_tag is None: + self.prepared_tag = self.prepare_tag(self.event.tag) + length += len(self.prepared_tag) + if isinstance(self.event, ScalarEvent): + if self.analysis is None: + self.analysis = self.analyze_scalar(self.event.value) + length += len(self.analysis.scalar) + return (length < 128 and (isinstance(self.event, AliasEvent) + or (isinstance(self.event, ScalarEvent) + and not self.analysis.empty and not self.analysis.multiline) + or self.check_empty_sequence() or self.check_empty_mapping())) + + # Anchor, Tag, and Scalar processors. + + def process_anchor(self, indicator): + if self.event.anchor is None: + self.prepared_anchor = None + return + if self.prepared_anchor is None: + self.prepared_anchor = self.prepare_anchor(self.event.anchor) + if self.prepared_anchor: + self.write_indicator(indicator+self.prepared_anchor, True) + self.prepared_anchor = None + + def process_tag(self): + tag = self.event.tag + if isinstance(self.event, ScalarEvent): + if self.style is None: + self.style = self.choose_scalar_style() + if ((not self.canonical or tag is None) and + ((self.style == '' and self.event.implicit[0]) + or (self.style != '' and self.event.implicit[1]))): + self.prepared_tag = None + return + if self.event.implicit[0] and tag is None: + tag = '!' + self.prepared_tag = None + else: + if (not self.canonical or tag is None) and self.event.implicit: + self.prepared_tag = None + return + if tag is None: + raise EmitterError("tag is not specified") + if self.prepared_tag is None: + self.prepared_tag = self.prepare_tag(tag) + if self.prepared_tag: + self.write_indicator(self.prepared_tag, True) + self.prepared_tag = None + + def choose_scalar_style(self): + if self.analysis is None: + self.analysis = self.analyze_scalar(self.event.value) + if self.event.style == '"' or self.canonical: + return '"' + if not self.event.style and self.event.implicit[0]: + if (not (self.simple_key_context and + (self.analysis.empty or self.analysis.multiline)) + and (self.flow_level and self.analysis.allow_flow_plain + or (not self.flow_level and self.analysis.allow_block_plain))): + return '' + if self.event.style and self.event.style in '|>': + if (not self.flow_level and not self.simple_key_context + and self.analysis.allow_block): + return self.event.style + if not self.event.style or self.event.style == '\'': + if (self.analysis.allow_single_quoted and + not (self.simple_key_context and self.analysis.multiline)): + return '\'' + return '"' + + def process_scalar(self): + if self.analysis is None: + self.analysis = self.analyze_scalar(self.event.value) + if self.style is None: + self.style = self.choose_scalar_style() + split = (not self.simple_key_context) + #if self.analysis.multiline and split \ + # and (not self.style or self.style in '\'\"'): + # self.write_indent() + if self.style == '"': + self.write_double_quoted(self.analysis.scalar, split) + elif self.style == '\'': + self.write_single_quoted(self.analysis.scalar, split) + elif self.style == '>': + self.write_folded(self.analysis.scalar) + elif self.style == '|': + self.write_literal(self.analysis.scalar) + else: + self.write_plain(self.analysis.scalar, split) + self.analysis = None + self.style = None + + # Analyzers. + + def prepare_version(self, version): + major, minor = version + if major != 1: + raise EmitterError("unsupported YAML version: %d.%d" % (major, minor)) + return '%d.%d' % (major, minor) + + def prepare_tag_handle(self, handle): + if not handle: + raise EmitterError("tag handle must not be empty") + if handle[0] != '!' or handle[-1] != '!': + raise EmitterError("tag handle must start and end with '!': %r" % handle) + for ch in handle[1:-1]: + if not ('0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \ + or ch in '-_'): + raise EmitterError("invalid character %r in the tag handle: %r" + % (ch, handle)) + return handle + + def prepare_tag_prefix(self, prefix): + if not prefix: + raise EmitterError("tag prefix must not be empty") + chunks = [] + start = end = 0 + if prefix[0] == '!': + end = 1 + while end < len(prefix): + ch = prefix[end] + if '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \ + or ch in '-;/?!:@&=+$,_.~*\'()[]': + end += 1 + else: + if start < end: + chunks.append(prefix[start:end]) + start = end = end+1 + data = ch.encode('utf-8') + for ch in data: + chunks.append('%%%02X' % ord(ch)) + if start < end: + chunks.append(prefix[start:end]) + return ''.join(chunks) + + def prepare_tag(self, tag): + if not tag: + raise EmitterError("tag must not be empty") + if tag == '!': + return tag + handle = None + suffix = tag + prefixes = sorted(self.tag_prefixes.keys()) + for prefix in prefixes: + if tag.startswith(prefix) \ + and (prefix == '!' or len(prefix) < len(tag)): + handle = self.tag_prefixes[prefix] + suffix = tag[len(prefix):] + chunks = [] + start = end = 0 + while end < len(suffix): + ch = suffix[end] + if '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \ + or ch in '-;/?:@&=+$,_.~*\'()[]' \ + or (ch == '!' and handle != '!'): + end += 1 + else: + if start < end: + chunks.append(suffix[start:end]) + start = end = end+1 + data = ch.encode('utf-8') + for ch in data: + chunks.append('%%%02X' % ch) + if start < end: + chunks.append(suffix[start:end]) + suffix_text = ''.join(chunks) + if handle: + return '%s%s' % (handle, suffix_text) + else: + return '!<%s>' % suffix_text + + def prepare_anchor(self, anchor): + if not anchor: + raise EmitterError("anchor must not be empty") + for ch in anchor: + if not ('0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \ + or ch in '-_'): + raise EmitterError("invalid character %r in the anchor: %r" + % (ch, anchor)) + return anchor + + def analyze_scalar(self, scalar): + + # Empty scalar is a special case. + if not scalar: + return ScalarAnalysis(scalar=scalar, empty=True, multiline=False, + allow_flow_plain=False, allow_block_plain=True, + allow_single_quoted=True, allow_double_quoted=True, + allow_block=False) + + # Indicators and special characters. + block_indicators = False + flow_indicators = False + line_breaks = False + special_characters = False + + # Important whitespace combinations. + leading_space = False + leading_break = False + trailing_space = False + trailing_break = False + break_space = False + space_break = False + + # Check document indicators. + if scalar.startswith('---') or scalar.startswith('...'): + block_indicators = True + flow_indicators = True + + # First character or preceded by a whitespace. + preceded_by_whitespace = True + + # Last character or followed by a whitespace. + followed_by_whitespace = (len(scalar) == 1 or + scalar[1] in '\0 \t\r\n\x85\u2028\u2029') + + # The previous character is a space. + previous_space = False + + # The previous character is a break. + previous_break = False + + index = 0 + while index < len(scalar): + ch = scalar[index] + + # Check for indicators. + if index == 0: + # Leading indicators are special characters. + if ch in '#,[]{}&*!|>\'\"%@`': + flow_indicators = True + block_indicators = True + if ch in '?:': + flow_indicators = True + if followed_by_whitespace: + block_indicators = True + if ch == '-' and followed_by_whitespace: + flow_indicators = True + block_indicators = True + else: + # Some indicators cannot appear within a scalar as well. + if ch in ',?[]{}': + flow_indicators = True + if ch == ':': + flow_indicators = True + if followed_by_whitespace: + block_indicators = True + if ch == '#' and preceded_by_whitespace: + flow_indicators = True + block_indicators = True + + # Check for line breaks, special, and unicode characters. + if ch in '\n\x85\u2028\u2029': + line_breaks = True + if not (ch == '\n' or '\x20' <= ch <= '\x7E'): + if (ch == '\x85' or '\xA0' <= ch <= '\uD7FF' + or '\uE000' <= ch <= '\uFFFD' + or '\U00010000' <= ch < '\U0010ffff') and ch != '\uFEFF': + unicode_characters = True + if not self.allow_unicode: + special_characters = True + else: + special_characters = True + + # Detect important whitespace combinations. + if ch == ' ': + if index == 0: + leading_space = True + if index == len(scalar)-1: + trailing_space = True + if previous_break: + break_space = True + previous_space = True + previous_break = False + elif ch in '\n\x85\u2028\u2029': + if index == 0: + leading_break = True + if index == len(scalar)-1: + trailing_break = True + if previous_space: + space_break = True + previous_space = False + previous_break = True + else: + previous_space = False + previous_break = False + + # Prepare for the next character. + index += 1 + preceded_by_whitespace = (ch in '\0 \t\r\n\x85\u2028\u2029') + followed_by_whitespace = (index+1 >= len(scalar) or + scalar[index+1] in '\0 \t\r\n\x85\u2028\u2029') + + # Let's decide what styles are allowed. + allow_flow_plain = True + allow_block_plain = True + allow_single_quoted = True + allow_double_quoted = True + allow_block = True + + # Leading and trailing whitespaces are bad for plain scalars. + if (leading_space or leading_break + or trailing_space or trailing_break): + allow_flow_plain = allow_block_plain = False + + # We do not permit trailing spaces for block scalars. + if trailing_space: + allow_block = False + + # Spaces at the beginning of a new line are only acceptable for block + # scalars. + if break_space: + allow_flow_plain = allow_block_plain = allow_single_quoted = False + + # Spaces followed by breaks, as well as special character are only + # allowed for double quoted scalars. + if space_break or special_characters: + allow_flow_plain = allow_block_plain = \ + allow_single_quoted = allow_block = False + + # Although the plain scalar writer supports breaks, we never emit + # multiline plain scalars. + if line_breaks: + allow_flow_plain = allow_block_plain = False + + # Flow indicators are forbidden for flow plain scalars. + if flow_indicators: + allow_flow_plain = False + + # Block indicators are forbidden for block plain scalars. + if block_indicators: + allow_block_plain = False + + return ScalarAnalysis(scalar=scalar, + empty=False, multiline=line_breaks, + allow_flow_plain=allow_flow_plain, + allow_block_plain=allow_block_plain, + allow_single_quoted=allow_single_quoted, + allow_double_quoted=allow_double_quoted, + allow_block=allow_block) + + # Writers. + + def flush_stream(self): + if hasattr(self.stream, 'flush'): + self.stream.flush() + + def write_stream_start(self): + # Write BOM if needed. + if self.encoding and self.encoding.startswith('utf-16'): + self.stream.write('\uFEFF'.encode(self.encoding)) + + def write_stream_end(self): + self.flush_stream() + + def write_indicator(self, indicator, need_whitespace, + whitespace=False, indention=False): + if self.whitespace or not need_whitespace: + data = indicator + else: + data = ' '+indicator + self.whitespace = whitespace + self.indention = self.indention and indention + self.column += len(data) + self.open_ended = False + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + + def write_indent(self): + indent = self.indent or 0 + if not self.indention or self.column > indent \ + or (self.column == indent and not self.whitespace): + self.write_line_break() + if self.column < indent: + self.whitespace = True + data = ' '*(indent-self.column) + self.column = indent + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + + def write_line_break(self, data=None): + if data is None: + data = self.best_line_break + self.whitespace = True + self.indention = True + self.line += 1 + self.column = 0 + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + + def write_version_directive(self, version_text): + data = '%%YAML %s' % version_text + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + self.write_line_break() + + def write_tag_directive(self, handle_text, prefix_text): + data = '%%TAG %s %s' % (handle_text, prefix_text) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + self.write_line_break() + + # Scalar streams. + + def write_single_quoted(self, text, split=True): + self.write_indicator('\'', True) + spaces = False + breaks = False + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if spaces: + if ch is None or ch != ' ': + if start+1 == end and self.column > self.best_width and split \ + and start != 0 and end != len(text): + self.write_indent() + else: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + elif breaks: + if ch is None or ch not in '\n\x85\u2028\u2029': + if text[start] == '\n': + self.write_line_break() + for br in text[start:end]: + if br == '\n': + self.write_line_break() + else: + self.write_line_break(br) + self.write_indent() + start = end + else: + if ch is None or ch in ' \n\x85\u2028\u2029' or ch == '\'': + if start < end: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + if ch == '\'': + data = '\'\'' + self.column += 2 + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + 1 + if ch is not None: + spaces = (ch == ' ') + breaks = (ch in '\n\x85\u2028\u2029') + end += 1 + self.write_indicator('\'', False) + + ESCAPE_REPLACEMENTS = { + '\0': '0', + '\x07': 'a', + '\x08': 'b', + '\x09': 't', + '\x0A': 'n', + '\x0B': 'v', + '\x0C': 'f', + '\x0D': 'r', + '\x1B': 'e', + '\"': '\"', + '\\': '\\', + '\x85': 'N', + '\xA0': '_', + '\u2028': 'L', + '\u2029': 'P', + } + + def write_double_quoted(self, text, split=True): + self.write_indicator('"', True) + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if ch is None or ch in '"\\\x85\u2028\u2029\uFEFF' \ + or not ('\x20' <= ch <= '\x7E' + or (self.allow_unicode + and ('\xA0' <= ch <= '\uD7FF' + or '\uE000' <= ch <= '\uFFFD'))): + if start < end: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + if ch is not None: + if ch in self.ESCAPE_REPLACEMENTS: + data = '\\'+self.ESCAPE_REPLACEMENTS[ch] + elif ch <= '\xFF': + data = '\\x%02X' % ord(ch) + elif ch <= '\uFFFF': + data = '\\u%04X' % ord(ch) + else: + data = '\\U%08X' % ord(ch) + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end+1 + if 0 < end < len(text)-1 and (ch == ' ' or start >= end) \ + and self.column+(end-start) > self.best_width and split: + data = text[start:end]+'\\' + if start < end: + start = end + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + self.write_indent() + self.whitespace = False + self.indention = False + if text[start] == ' ': + data = '\\' + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + end += 1 + self.write_indicator('"', False) + + def determine_block_hints(self, text): + hints = '' + if text: + if text[0] in ' \n\x85\u2028\u2029': + hints += str(self.best_indent) + if text[-1] not in '\n\x85\u2028\u2029': + hints += '-' + elif len(text) == 1 or text[-2] in '\n\x85\u2028\u2029': + hints += '+' + return hints + + def write_folded(self, text): + hints = self.determine_block_hints(text) + self.write_indicator('>'+hints, True) + if hints[-1:] == '+': + self.open_ended = True + self.write_line_break() + leading_space = True + spaces = False + breaks = True + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if breaks: + if ch is None or ch not in '\n\x85\u2028\u2029': + if not leading_space and ch is not None and ch != ' ' \ + and text[start] == '\n': + self.write_line_break() + leading_space = (ch == ' ') + for br in text[start:end]: + if br == '\n': + self.write_line_break() + else: + self.write_line_break(br) + if ch is not None: + self.write_indent() + start = end + elif spaces: + if ch != ' ': + if start+1 == end and self.column > self.best_width: + self.write_indent() + else: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + else: + if ch is None or ch in ' \n\x85\u2028\u2029': + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + if ch is None: + self.write_line_break() + start = end + if ch is not None: + breaks = (ch in '\n\x85\u2028\u2029') + spaces = (ch == ' ') + end += 1 + + def write_literal(self, text): + hints = self.determine_block_hints(text) + self.write_indicator('|'+hints, True) + if hints[-1:] == '+': + self.open_ended = True + self.write_line_break() + breaks = True + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if breaks: + if ch is None or ch not in '\n\x85\u2028\u2029': + for br in text[start:end]: + if br == '\n': + self.write_line_break() + else: + self.write_line_break(br) + if ch is not None: + self.write_indent() + start = end + else: + if ch is None or ch in '\n\x85\u2028\u2029': + data = text[start:end] + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + if ch is None: + self.write_line_break() + start = end + if ch is not None: + breaks = (ch in '\n\x85\u2028\u2029') + end += 1 + + def write_plain(self, text, split=True): + if self.root_context: + self.open_ended = True + if not text: + return + if not self.whitespace: + data = ' ' + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + self.whitespace = False + self.indention = False + spaces = False + breaks = False + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if spaces: + if ch != ' ': + if start+1 == end and self.column > self.best_width and split: + self.write_indent() + self.whitespace = False + self.indention = False + else: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + elif breaks: + if ch not in '\n\x85\u2028\u2029': + if text[start] == '\n': + self.write_line_break() + for br in text[start:end]: + if br == '\n': + self.write_line_break() + else: + self.write_line_break(br) + self.write_indent() + self.whitespace = False + self.indention = False + start = end + else: + if ch is None or ch in ' \n\x85\u2028\u2029': + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + if ch is not None: + spaces = (ch == ' ') + breaks = (ch in '\n\x85\u2028\u2029') + end += 1 diff --git a/hackaton/lib/python3.12/site-packages/yaml/error.py b/hackaton/lib/python3.12/site-packages/yaml/error.py new file mode 100644 index 0000000..b796b4d --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/yaml/error.py @@ -0,0 +1,75 @@ + +__all__ = ['Mark', 'YAMLError', 'MarkedYAMLError'] + +class Mark: + + def __init__(self, name, index, line, column, buffer, pointer): + self.name = name + self.index = index + self.line = line + self.column = column + self.buffer = buffer + self.pointer = pointer + + def get_snippet(self, indent=4, max_length=75): + if self.buffer is None: + return None + head = '' + start = self.pointer + while start > 0 and self.buffer[start-1] not in '\0\r\n\x85\u2028\u2029': + start -= 1 + if self.pointer-start > max_length/2-1: + head = ' ... ' + start += 5 + break + tail = '' + end = self.pointer + while end < len(self.buffer) and self.buffer[end] not in '\0\r\n\x85\u2028\u2029': + end += 1 + if end-self.pointer > max_length/2-1: + tail = ' ... ' + end -= 5 + break + snippet = self.buffer[start:end] + return ' '*indent + head + snippet + tail + '\n' \ + + ' '*(indent+self.pointer-start+len(head)) + '^' + + def __str__(self): + snippet = self.get_snippet() + where = " in \"%s\", line %d, column %d" \ + % (self.name, self.line+1, self.column+1) + if snippet is not None: + where += ":\n"+snippet + return where + +class YAMLError(Exception): + pass + +class MarkedYAMLError(YAMLError): + + def __init__(self, context=None, context_mark=None, + problem=None, problem_mark=None, note=None): + self.context = context + self.context_mark = context_mark + self.problem = problem + self.problem_mark = problem_mark + self.note = note + + def __str__(self): + lines = [] + if self.context is not None: + lines.append(self.context) + if self.context_mark is not None \ + and (self.problem is None or self.problem_mark is None + or self.context_mark.name != self.problem_mark.name + or self.context_mark.line != self.problem_mark.line + or self.context_mark.column != self.problem_mark.column): + lines.append(str(self.context_mark)) + if self.problem is not None: + lines.append(self.problem) + if self.problem_mark is not None: + lines.append(str(self.problem_mark)) + if self.note is not None: + lines.append(self.note) + return '\n'.join(lines) + diff --git a/hackaton/lib/python3.12/site-packages/yaml/events.py b/hackaton/lib/python3.12/site-packages/yaml/events.py new file mode 100644 index 0000000..f79ad38 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/yaml/events.py @@ -0,0 +1,86 @@ + +# Abstract classes. + +class Event(object): + def __init__(self, start_mark=None, end_mark=None): + self.start_mark = start_mark + self.end_mark = end_mark + def __repr__(self): + attributes = [key for key in ['anchor', 'tag', 'implicit', 'value'] + if hasattr(self, key)] + arguments = ', '.join(['%s=%r' % (key, getattr(self, key)) + for key in attributes]) + return '%s(%s)' % (self.__class__.__name__, arguments) + +class NodeEvent(Event): + def __init__(self, anchor, start_mark=None, end_mark=None): + self.anchor = anchor + self.start_mark = start_mark + self.end_mark = end_mark + +class CollectionStartEvent(NodeEvent): + def __init__(self, anchor, tag, implicit, start_mark=None, end_mark=None, + flow_style=None): + self.anchor = anchor + self.tag = tag + self.implicit = implicit + self.start_mark = start_mark + self.end_mark = end_mark + self.flow_style = flow_style + +class CollectionEndEvent(Event): + pass + +# Implementations. + +class StreamStartEvent(Event): + def __init__(self, start_mark=None, end_mark=None, encoding=None): + self.start_mark = start_mark + self.end_mark = end_mark + self.encoding = encoding + +class StreamEndEvent(Event): + pass + +class DocumentStartEvent(Event): + def __init__(self, start_mark=None, end_mark=None, + explicit=None, version=None, tags=None): + self.start_mark = start_mark + self.end_mark = end_mark + self.explicit = explicit + self.version = version + self.tags = tags + +class DocumentEndEvent(Event): + def __init__(self, start_mark=None, end_mark=None, + explicit=None): + self.start_mark = start_mark + self.end_mark = end_mark + self.explicit = explicit + +class AliasEvent(NodeEvent): + pass + +class ScalarEvent(NodeEvent): + def __init__(self, anchor, tag, implicit, value, + start_mark=None, end_mark=None, style=None): + self.anchor = anchor + self.tag = tag + self.implicit = implicit + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + self.style = style + +class SequenceStartEvent(CollectionStartEvent): + pass + +class SequenceEndEvent(CollectionEndEvent): + pass + +class MappingStartEvent(CollectionStartEvent): + pass + +class MappingEndEvent(CollectionEndEvent): + pass + diff --git a/hackaton/lib/python3.12/site-packages/yaml/loader.py b/hackaton/lib/python3.12/site-packages/yaml/loader.py new file mode 100644 index 0000000..e90c112 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/yaml/loader.py @@ -0,0 +1,63 @@ + +__all__ = ['BaseLoader', 'FullLoader', 'SafeLoader', 'Loader', 'UnsafeLoader'] + +from .reader import * +from .scanner import * +from .parser import * +from .composer import * +from .constructor import * +from .resolver import * + +class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, BaseResolver): + + def __init__(self, stream): + Reader.__init__(self, stream) + Scanner.__init__(self) + Parser.__init__(self) + Composer.__init__(self) + BaseConstructor.__init__(self) + BaseResolver.__init__(self) + +class FullLoader(Reader, Scanner, Parser, Composer, FullConstructor, Resolver): + + def __init__(self, stream): + Reader.__init__(self, stream) + Scanner.__init__(self) + Parser.__init__(self) + Composer.__init__(self) + FullConstructor.__init__(self) + Resolver.__init__(self) + +class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, Resolver): + + def __init__(self, stream): + Reader.__init__(self, stream) + Scanner.__init__(self) + Parser.__init__(self) + Composer.__init__(self) + SafeConstructor.__init__(self) + Resolver.__init__(self) + +class Loader(Reader, Scanner, Parser, Composer, Constructor, Resolver): + + def __init__(self, stream): + Reader.__init__(self, stream) + Scanner.__init__(self) + Parser.__init__(self) + Composer.__init__(self) + Constructor.__init__(self) + Resolver.__init__(self) + +# UnsafeLoader is the same as Loader (which is and was always unsafe on +# untrusted input). Use of either Loader or UnsafeLoader should be rare, since +# FullLoad should be able to load almost all YAML safely. Loader is left intact +# to ensure backwards compatibility. +class UnsafeLoader(Reader, Scanner, Parser, Composer, Constructor, Resolver): + + def __init__(self, stream): + Reader.__init__(self, stream) + Scanner.__init__(self) + Parser.__init__(self) + Composer.__init__(self) + Constructor.__init__(self) + Resolver.__init__(self) diff --git a/hackaton/lib/python3.12/site-packages/yaml/nodes.py b/hackaton/lib/python3.12/site-packages/yaml/nodes.py new file mode 100644 index 0000000..c4f070c --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/yaml/nodes.py @@ -0,0 +1,49 @@ + +class Node(object): + def __init__(self, tag, value, start_mark, end_mark): + self.tag = tag + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + def __repr__(self): + value = self.value + #if isinstance(value, list): + # if len(value) == 0: + # value = '' + # elif len(value) == 1: + # value = '<1 item>' + # else: + # value = '<%d items>' % len(value) + #else: + # if len(value) > 75: + # value = repr(value[:70]+u' ... ') + # else: + # value = repr(value) + value = repr(value) + return '%s(tag=%r, value=%s)' % (self.__class__.__name__, self.tag, value) + +class ScalarNode(Node): + id = 'scalar' + def __init__(self, tag, value, + start_mark=None, end_mark=None, style=None): + self.tag = tag + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + self.style = style + +class CollectionNode(Node): + def __init__(self, tag, value, + start_mark=None, end_mark=None, flow_style=None): + self.tag = tag + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + self.flow_style = flow_style + +class SequenceNode(CollectionNode): + id = 'sequence' + +class MappingNode(CollectionNode): + id = 'mapping' + diff --git a/hackaton/lib/python3.12/site-packages/yaml/parser.py b/hackaton/lib/python3.12/site-packages/yaml/parser.py new file mode 100644 index 0000000..13a5995 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/yaml/parser.py @@ -0,0 +1,589 @@ + +# The following YAML grammar is LL(1) and is parsed by a recursive descent +# parser. +# +# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +# implicit_document ::= block_node DOCUMENT-END* +# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +# block_node_or_indentless_sequence ::= +# ALIAS +# | properties (block_content | indentless_block_sequence)? +# | block_content +# | indentless_block_sequence +# block_node ::= ALIAS +# | properties block_content? +# | block_content +# flow_node ::= ALIAS +# | properties flow_content? +# | flow_content +# properties ::= TAG ANCHOR? | ANCHOR TAG? +# block_content ::= block_collection | flow_collection | SCALAR +# flow_content ::= flow_collection | SCALAR +# block_collection ::= block_sequence | block_mapping +# flow_collection ::= flow_sequence | flow_mapping +# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +# indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +# block_mapping ::= BLOCK-MAPPING_START +# ((KEY block_node_or_indentless_sequence?)? +# (VALUE block_node_or_indentless_sequence?)?)* +# BLOCK-END +# flow_sequence ::= FLOW-SEQUENCE-START +# (flow_sequence_entry FLOW-ENTRY)* +# flow_sequence_entry? +# FLOW-SEQUENCE-END +# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +# flow_mapping ::= FLOW-MAPPING-START +# (flow_mapping_entry FLOW-ENTRY)* +# flow_mapping_entry? +# FLOW-MAPPING-END +# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +# +# FIRST sets: +# +# stream: { STREAM-START } +# explicit_document: { DIRECTIVE DOCUMENT-START } +# implicit_document: FIRST(block_node) +# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START } +# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START } +# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR } +# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR } +# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START } +# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START } +# block_sequence: { BLOCK-SEQUENCE-START } +# block_mapping: { BLOCK-MAPPING-START } +# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START BLOCK-ENTRY } +# indentless_sequence: { ENTRY } +# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START } +# flow_sequence: { FLOW-SEQUENCE-START } +# flow_mapping: { FLOW-MAPPING-START } +# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY } +# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY } + +__all__ = ['Parser', 'ParserError'] + +from .error import MarkedYAMLError +from .tokens import * +from .events import * +from .scanner import * + +class ParserError(MarkedYAMLError): + pass + +class Parser: + # Since writing a recursive-descendant parser is a straightforward task, we + # do not give many comments here. + + DEFAULT_TAGS = { + '!': '!', + '!!': 'tag:yaml.org,2002:', + } + + def __init__(self): + self.current_event = None + self.yaml_version = None + self.tag_handles = {} + self.states = [] + self.marks = [] + self.state = self.parse_stream_start + + def dispose(self): + # Reset the state attributes (to clear self-references) + self.states = [] + self.state = None + + def check_event(self, *choices): + # Check the type of the next event. + if self.current_event is None: + if self.state: + self.current_event = self.state() + if self.current_event is not None: + if not choices: + return True + for choice in choices: + if isinstance(self.current_event, choice): + return True + return False + + def peek_event(self): + # Get the next event. + if self.current_event is None: + if self.state: + self.current_event = self.state() + return self.current_event + + def get_event(self): + # Get the next event and proceed further. + if self.current_event is None: + if self.state: + self.current_event = self.state() + value = self.current_event + self.current_event = None + return value + + # stream ::= STREAM-START implicit_document? explicit_document* STREAM-END + # implicit_document ::= block_node DOCUMENT-END* + # explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* + + def parse_stream_start(self): + + # Parse the stream start. + token = self.get_token() + event = StreamStartEvent(token.start_mark, token.end_mark, + encoding=token.encoding) + + # Prepare the next state. + self.state = self.parse_implicit_document_start + + return event + + def parse_implicit_document_start(self): + + # Parse an implicit document. + if not self.check_token(DirectiveToken, DocumentStartToken, + StreamEndToken): + self.tag_handles = self.DEFAULT_TAGS + token = self.peek_token() + start_mark = end_mark = token.start_mark + event = DocumentStartEvent(start_mark, end_mark, + explicit=False) + + # Prepare the next state. + self.states.append(self.parse_document_end) + self.state = self.parse_block_node + + return event + + else: + return self.parse_document_start() + + def parse_document_start(self): + + # Parse any extra document end indicators. + while self.check_token(DocumentEndToken): + self.get_token() + + # Parse an explicit document. + if not self.check_token(StreamEndToken): + token = self.peek_token() + start_mark = token.start_mark + version, tags = self.process_directives() + if not self.check_token(DocumentStartToken): + raise ParserError(None, None, + "expected '', but found %r" + % self.peek_token().id, + self.peek_token().start_mark) + token = self.get_token() + end_mark = token.end_mark + event = DocumentStartEvent(start_mark, end_mark, + explicit=True, version=version, tags=tags) + self.states.append(self.parse_document_end) + self.state = self.parse_document_content + else: + # Parse the end of the stream. + token = self.get_token() + event = StreamEndEvent(token.start_mark, token.end_mark) + assert not self.states + assert not self.marks + self.state = None + return event + + def parse_document_end(self): + + # Parse the document end. + token = self.peek_token() + start_mark = end_mark = token.start_mark + explicit = False + if self.check_token(DocumentEndToken): + token = self.get_token() + end_mark = token.end_mark + explicit = True + event = DocumentEndEvent(start_mark, end_mark, + explicit=explicit) + + # Prepare the next state. + self.state = self.parse_document_start + + return event + + def parse_document_content(self): + if self.check_token(DirectiveToken, + DocumentStartToken, DocumentEndToken, StreamEndToken): + event = self.process_empty_scalar(self.peek_token().start_mark) + self.state = self.states.pop() + return event + else: + return self.parse_block_node() + + def process_directives(self): + self.yaml_version = None + self.tag_handles = {} + while self.check_token(DirectiveToken): + token = self.get_token() + if token.name == 'YAML': + if self.yaml_version is not None: + raise ParserError(None, None, + "found duplicate YAML directive", token.start_mark) + major, minor = token.value + if major != 1: + raise ParserError(None, None, + "found incompatible YAML document (version 1.* is required)", + token.start_mark) + self.yaml_version = token.value + elif token.name == 'TAG': + handle, prefix = token.value + if handle in self.tag_handles: + raise ParserError(None, None, + "duplicate tag handle %r" % handle, + token.start_mark) + self.tag_handles[handle] = prefix + if self.tag_handles: + value = self.yaml_version, self.tag_handles.copy() + else: + value = self.yaml_version, None + for key in self.DEFAULT_TAGS: + if key not in self.tag_handles: + self.tag_handles[key] = self.DEFAULT_TAGS[key] + return value + + # block_node_or_indentless_sequence ::= ALIAS + # | properties (block_content | indentless_block_sequence)? + # | block_content + # | indentless_block_sequence + # block_node ::= ALIAS + # | properties block_content? + # | block_content + # flow_node ::= ALIAS + # | properties flow_content? + # | flow_content + # properties ::= TAG ANCHOR? | ANCHOR TAG? + # block_content ::= block_collection | flow_collection | SCALAR + # flow_content ::= flow_collection | SCALAR + # block_collection ::= block_sequence | block_mapping + # flow_collection ::= flow_sequence | flow_mapping + + def parse_block_node(self): + return self.parse_node(block=True) + + def parse_flow_node(self): + return self.parse_node() + + def parse_block_node_or_indentless_sequence(self): + return self.parse_node(block=True, indentless_sequence=True) + + def parse_node(self, block=False, indentless_sequence=False): + if self.check_token(AliasToken): + token = self.get_token() + event = AliasEvent(token.value, token.start_mark, token.end_mark) + self.state = self.states.pop() + else: + anchor = None + tag = None + start_mark = end_mark = tag_mark = None + if self.check_token(AnchorToken): + token = self.get_token() + start_mark = token.start_mark + end_mark = token.end_mark + anchor = token.value + if self.check_token(TagToken): + token = self.get_token() + tag_mark = token.start_mark + end_mark = token.end_mark + tag = token.value + elif self.check_token(TagToken): + token = self.get_token() + start_mark = tag_mark = token.start_mark + end_mark = token.end_mark + tag = token.value + if self.check_token(AnchorToken): + token = self.get_token() + end_mark = token.end_mark + anchor = token.value + if tag is not None: + handle, suffix = tag + if handle is not None: + if handle not in self.tag_handles: + raise ParserError("while parsing a node", start_mark, + "found undefined tag handle %r" % handle, + tag_mark) + tag = self.tag_handles[handle]+suffix + else: + tag = suffix + #if tag == '!': + # raise ParserError("while parsing a node", start_mark, + # "found non-specific tag '!'", tag_mark, + # "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' and share your opinion.") + if start_mark is None: + start_mark = end_mark = self.peek_token().start_mark + event = None + implicit = (tag is None or tag == '!') + if indentless_sequence and self.check_token(BlockEntryToken): + end_mark = self.peek_token().end_mark + event = SequenceStartEvent(anchor, tag, implicit, + start_mark, end_mark) + self.state = self.parse_indentless_sequence_entry + else: + if self.check_token(ScalarToken): + token = self.get_token() + end_mark = token.end_mark + if (token.plain and tag is None) or tag == '!': + implicit = (True, False) + elif tag is None: + implicit = (False, True) + else: + implicit = (False, False) + event = ScalarEvent(anchor, tag, implicit, token.value, + start_mark, end_mark, style=token.style) + self.state = self.states.pop() + elif self.check_token(FlowSequenceStartToken): + end_mark = self.peek_token().end_mark + event = SequenceStartEvent(anchor, tag, implicit, + start_mark, end_mark, flow_style=True) + self.state = self.parse_flow_sequence_first_entry + elif self.check_token(FlowMappingStartToken): + end_mark = self.peek_token().end_mark + event = MappingStartEvent(anchor, tag, implicit, + start_mark, end_mark, flow_style=True) + self.state = self.parse_flow_mapping_first_key + elif block and self.check_token(BlockSequenceStartToken): + end_mark = self.peek_token().start_mark + event = SequenceStartEvent(anchor, tag, implicit, + start_mark, end_mark, flow_style=False) + self.state = self.parse_block_sequence_first_entry + elif block and self.check_token(BlockMappingStartToken): + end_mark = self.peek_token().start_mark + event = MappingStartEvent(anchor, tag, implicit, + start_mark, end_mark, flow_style=False) + self.state = self.parse_block_mapping_first_key + elif anchor is not None or tag is not None: + # Empty scalars are allowed even if a tag or an anchor is + # specified. + event = ScalarEvent(anchor, tag, (implicit, False), '', + start_mark, end_mark) + self.state = self.states.pop() + else: + if block: + node = 'block' + else: + node = 'flow' + token = self.peek_token() + raise ParserError("while parsing a %s node" % node, start_mark, + "expected the node content, but found %r" % token.id, + token.start_mark) + return event + + # block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END + + def parse_block_sequence_first_entry(self): + token = self.get_token() + self.marks.append(token.start_mark) + return self.parse_block_sequence_entry() + + def parse_block_sequence_entry(self): + if self.check_token(BlockEntryToken): + token = self.get_token() + if not self.check_token(BlockEntryToken, BlockEndToken): + self.states.append(self.parse_block_sequence_entry) + return self.parse_block_node() + else: + self.state = self.parse_block_sequence_entry + return self.process_empty_scalar(token.end_mark) + if not self.check_token(BlockEndToken): + token = self.peek_token() + raise ParserError("while parsing a block collection", self.marks[-1], + "expected , but found %r" % token.id, token.start_mark) + token = self.get_token() + event = SequenceEndEvent(token.start_mark, token.end_mark) + self.state = self.states.pop() + self.marks.pop() + return event + + # indentless_sequence ::= (BLOCK-ENTRY block_node?)+ + + def parse_indentless_sequence_entry(self): + if self.check_token(BlockEntryToken): + token = self.get_token() + if not self.check_token(BlockEntryToken, + KeyToken, ValueToken, BlockEndToken): + self.states.append(self.parse_indentless_sequence_entry) + return self.parse_block_node() + else: + self.state = self.parse_indentless_sequence_entry + return self.process_empty_scalar(token.end_mark) + token = self.peek_token() + event = SequenceEndEvent(token.start_mark, token.start_mark) + self.state = self.states.pop() + return event + + # block_mapping ::= BLOCK-MAPPING_START + # ((KEY block_node_or_indentless_sequence?)? + # (VALUE block_node_or_indentless_sequence?)?)* + # BLOCK-END + + def parse_block_mapping_first_key(self): + token = self.get_token() + self.marks.append(token.start_mark) + return self.parse_block_mapping_key() + + def parse_block_mapping_key(self): + if self.check_token(KeyToken): + token = self.get_token() + if not self.check_token(KeyToken, ValueToken, BlockEndToken): + self.states.append(self.parse_block_mapping_value) + return self.parse_block_node_or_indentless_sequence() + else: + self.state = self.parse_block_mapping_value + return self.process_empty_scalar(token.end_mark) + if not self.check_token(BlockEndToken): + token = self.peek_token() + raise ParserError("while parsing a block mapping", self.marks[-1], + "expected , but found %r" % token.id, token.start_mark) + token = self.get_token() + event = MappingEndEvent(token.start_mark, token.end_mark) + self.state = self.states.pop() + self.marks.pop() + return event + + def parse_block_mapping_value(self): + if self.check_token(ValueToken): + token = self.get_token() + if not self.check_token(KeyToken, ValueToken, BlockEndToken): + self.states.append(self.parse_block_mapping_key) + return self.parse_block_node_or_indentless_sequence() + else: + self.state = self.parse_block_mapping_key + return self.process_empty_scalar(token.end_mark) + else: + self.state = self.parse_block_mapping_key + token = self.peek_token() + return self.process_empty_scalar(token.start_mark) + + # flow_sequence ::= FLOW-SEQUENCE-START + # (flow_sequence_entry FLOW-ENTRY)* + # flow_sequence_entry? + # FLOW-SEQUENCE-END + # flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + # + # Note that while production rules for both flow_sequence_entry and + # flow_mapping_entry are equal, their interpretations are different. + # For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?` + # generate an inline mapping (set syntax). + + def parse_flow_sequence_first_entry(self): + token = self.get_token() + self.marks.append(token.start_mark) + return self.parse_flow_sequence_entry(first=True) + + def parse_flow_sequence_entry(self, first=False): + if not self.check_token(FlowSequenceEndToken): + if not first: + if self.check_token(FlowEntryToken): + self.get_token() + else: + token = self.peek_token() + raise ParserError("while parsing a flow sequence", self.marks[-1], + "expected ',' or ']', but got %r" % token.id, token.start_mark) + + if self.check_token(KeyToken): + token = self.peek_token() + event = MappingStartEvent(None, None, True, + token.start_mark, token.end_mark, + flow_style=True) + self.state = self.parse_flow_sequence_entry_mapping_key + return event + elif not self.check_token(FlowSequenceEndToken): + self.states.append(self.parse_flow_sequence_entry) + return self.parse_flow_node() + token = self.get_token() + event = SequenceEndEvent(token.start_mark, token.end_mark) + self.state = self.states.pop() + self.marks.pop() + return event + + def parse_flow_sequence_entry_mapping_key(self): + token = self.get_token() + if not self.check_token(ValueToken, + FlowEntryToken, FlowSequenceEndToken): + self.states.append(self.parse_flow_sequence_entry_mapping_value) + return self.parse_flow_node() + else: + self.state = self.parse_flow_sequence_entry_mapping_value + return self.process_empty_scalar(token.end_mark) + + def parse_flow_sequence_entry_mapping_value(self): + if self.check_token(ValueToken): + token = self.get_token() + if not self.check_token(FlowEntryToken, FlowSequenceEndToken): + self.states.append(self.parse_flow_sequence_entry_mapping_end) + return self.parse_flow_node() + else: + self.state = self.parse_flow_sequence_entry_mapping_end + return self.process_empty_scalar(token.end_mark) + else: + self.state = self.parse_flow_sequence_entry_mapping_end + token = self.peek_token() + return self.process_empty_scalar(token.start_mark) + + def parse_flow_sequence_entry_mapping_end(self): + self.state = self.parse_flow_sequence_entry + token = self.peek_token() + return MappingEndEvent(token.start_mark, token.start_mark) + + # flow_mapping ::= FLOW-MAPPING-START + # (flow_mapping_entry FLOW-ENTRY)* + # flow_mapping_entry? + # FLOW-MAPPING-END + # flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + + def parse_flow_mapping_first_key(self): + token = self.get_token() + self.marks.append(token.start_mark) + return self.parse_flow_mapping_key(first=True) + + def parse_flow_mapping_key(self, first=False): + if not self.check_token(FlowMappingEndToken): + if not first: + if self.check_token(FlowEntryToken): + self.get_token() + else: + token = self.peek_token() + raise ParserError("while parsing a flow mapping", self.marks[-1], + "expected ',' or '}', but got %r" % token.id, token.start_mark) + if self.check_token(KeyToken): + token = self.get_token() + if not self.check_token(ValueToken, + FlowEntryToken, FlowMappingEndToken): + self.states.append(self.parse_flow_mapping_value) + return self.parse_flow_node() + else: + self.state = self.parse_flow_mapping_value + return self.process_empty_scalar(token.end_mark) + elif not self.check_token(FlowMappingEndToken): + self.states.append(self.parse_flow_mapping_empty_value) + return self.parse_flow_node() + token = self.get_token() + event = MappingEndEvent(token.start_mark, token.end_mark) + self.state = self.states.pop() + self.marks.pop() + return event + + def parse_flow_mapping_value(self): + if self.check_token(ValueToken): + token = self.get_token() + if not self.check_token(FlowEntryToken, FlowMappingEndToken): + self.states.append(self.parse_flow_mapping_key) + return self.parse_flow_node() + else: + self.state = self.parse_flow_mapping_key + return self.process_empty_scalar(token.end_mark) + else: + self.state = self.parse_flow_mapping_key + token = self.peek_token() + return self.process_empty_scalar(token.start_mark) + + def parse_flow_mapping_empty_value(self): + self.state = self.parse_flow_mapping_key + return self.process_empty_scalar(self.peek_token().start_mark) + + def process_empty_scalar(self, mark): + return ScalarEvent(None, None, (True, False), '', mark, mark) + diff --git a/hackaton/lib/python3.12/site-packages/yaml/reader.py b/hackaton/lib/python3.12/site-packages/yaml/reader.py new file mode 100644 index 0000000..774b021 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/yaml/reader.py @@ -0,0 +1,185 @@ +# This module contains abstractions for the input stream. You don't have to +# looks further, there are no pretty code. +# +# We define two classes here. +# +# Mark(source, line, column) +# It's just a record and its only use is producing nice error messages. +# Parser does not use it for any other purposes. +# +# Reader(source, data) +# Reader determines the encoding of `data` and converts it to unicode. +# Reader provides the following methods and attributes: +# reader.peek(length=1) - return the next `length` characters +# reader.forward(length=1) - move the current position to `length` characters. +# reader.index - the number of the current character. +# reader.line, stream.column - the line and the column of the current character. + +__all__ = ['Reader', 'ReaderError'] + +from .error import YAMLError, Mark + +import codecs, re + +class ReaderError(YAMLError): + + def __init__(self, name, position, character, encoding, reason): + self.name = name + self.character = character + self.position = position + self.encoding = encoding + self.reason = reason + + def __str__(self): + if isinstance(self.character, bytes): + return "'%s' codec can't decode byte #x%02x: %s\n" \ + " in \"%s\", position %d" \ + % (self.encoding, ord(self.character), self.reason, + self.name, self.position) + else: + return "unacceptable character #x%04x: %s\n" \ + " in \"%s\", position %d" \ + % (self.character, self.reason, + self.name, self.position) + +class Reader(object): + # Reader: + # - determines the data encoding and converts it to a unicode string, + # - checks if characters are in allowed range, + # - adds '\0' to the end. + + # Reader accepts + # - a `bytes` object, + # - a `str` object, + # - a file-like object with its `read` method returning `str`, + # - a file-like object with its `read` method returning `unicode`. + + # Yeah, it's ugly and slow. + + def __init__(self, stream): + self.name = None + self.stream = None + self.stream_pointer = 0 + self.eof = True + self.buffer = '' + self.pointer = 0 + self.raw_buffer = None + self.raw_decode = None + self.encoding = None + self.index = 0 + self.line = 0 + self.column = 0 + if isinstance(stream, str): + self.name = "" + self.check_printable(stream) + self.buffer = stream+'\0' + elif isinstance(stream, bytes): + self.name = "" + self.raw_buffer = stream + self.determine_encoding() + else: + self.stream = stream + self.name = getattr(stream, 'name', "") + self.eof = False + self.raw_buffer = None + self.determine_encoding() + + def peek(self, index=0): + try: + return self.buffer[self.pointer+index] + except IndexError: + self.update(index+1) + return self.buffer[self.pointer+index] + + def prefix(self, length=1): + if self.pointer+length >= len(self.buffer): + self.update(length) + return self.buffer[self.pointer:self.pointer+length] + + def forward(self, length=1): + if self.pointer+length+1 >= len(self.buffer): + self.update(length+1) + while length: + ch = self.buffer[self.pointer] + self.pointer += 1 + self.index += 1 + if ch in '\n\x85\u2028\u2029' \ + or (ch == '\r' and self.buffer[self.pointer] != '\n'): + self.line += 1 + self.column = 0 + elif ch != '\uFEFF': + self.column += 1 + length -= 1 + + def get_mark(self): + if self.stream is None: + return Mark(self.name, self.index, self.line, self.column, + self.buffer, self.pointer) + else: + return Mark(self.name, self.index, self.line, self.column, + None, None) + + def determine_encoding(self): + while not self.eof and (self.raw_buffer is None or len(self.raw_buffer) < 2): + self.update_raw() + if isinstance(self.raw_buffer, bytes): + if self.raw_buffer.startswith(codecs.BOM_UTF16_LE): + self.raw_decode = codecs.utf_16_le_decode + self.encoding = 'utf-16-le' + elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE): + self.raw_decode = codecs.utf_16_be_decode + self.encoding = 'utf-16-be' + else: + self.raw_decode = codecs.utf_8_decode + self.encoding = 'utf-8' + self.update(1) + + NON_PRINTABLE = re.compile('[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD\U00010000-\U0010ffff]') + def check_printable(self, data): + match = self.NON_PRINTABLE.search(data) + if match: + character = match.group() + position = self.index+(len(self.buffer)-self.pointer)+match.start() + raise ReaderError(self.name, position, ord(character), + 'unicode', "special characters are not allowed") + + def update(self, length): + if self.raw_buffer is None: + return + self.buffer = self.buffer[self.pointer:] + self.pointer = 0 + while len(self.buffer) < length: + if not self.eof: + self.update_raw() + if self.raw_decode is not None: + try: + data, converted = self.raw_decode(self.raw_buffer, + 'strict', self.eof) + except UnicodeDecodeError as exc: + character = self.raw_buffer[exc.start] + if self.stream is not None: + position = self.stream_pointer-len(self.raw_buffer)+exc.start + else: + position = exc.start + raise ReaderError(self.name, position, character, + exc.encoding, exc.reason) + else: + data = self.raw_buffer + converted = len(data) + self.check_printable(data) + self.buffer += data + self.raw_buffer = self.raw_buffer[converted:] + if self.eof: + self.buffer += '\0' + self.raw_buffer = None + break + + def update_raw(self, size=4096): + data = self.stream.read(size) + if self.raw_buffer is None: + self.raw_buffer = data + else: + self.raw_buffer += data + self.stream_pointer += len(data) + if not data: + self.eof = True diff --git a/hackaton/lib/python3.12/site-packages/yaml/representer.py b/hackaton/lib/python3.12/site-packages/yaml/representer.py new file mode 100644 index 0000000..808ca06 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/yaml/representer.py @@ -0,0 +1,389 @@ + +__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer', + 'RepresenterError'] + +from .error import * +from .nodes import * + +import datetime, copyreg, types, base64, collections + +class RepresenterError(YAMLError): + pass + +class BaseRepresenter: + + yaml_representers = {} + yaml_multi_representers = {} + + def __init__(self, default_style=None, default_flow_style=False, sort_keys=True): + self.default_style = default_style + self.sort_keys = sort_keys + self.default_flow_style = default_flow_style + self.represented_objects = {} + self.object_keeper = [] + self.alias_key = None + + def represent(self, data): + node = self.represent_data(data) + self.serialize(node) + self.represented_objects = {} + self.object_keeper = [] + self.alias_key = None + + def represent_data(self, data): + if self.ignore_aliases(data): + self.alias_key = None + else: + self.alias_key = id(data) + if self.alias_key is not None: + if self.alias_key in self.represented_objects: + node = self.represented_objects[self.alias_key] + #if node is None: + # raise RepresenterError("recursive objects are not allowed: %r" % data) + return node + #self.represented_objects[alias_key] = None + self.object_keeper.append(data) + data_types = type(data).__mro__ + if data_types[0] in self.yaml_representers: + node = self.yaml_representers[data_types[0]](self, data) + else: + for data_type in data_types: + if data_type in self.yaml_multi_representers: + node = self.yaml_multi_representers[data_type](self, data) + break + else: + if None in self.yaml_multi_representers: + node = self.yaml_multi_representers[None](self, data) + elif None in self.yaml_representers: + node = self.yaml_representers[None](self, data) + else: + node = ScalarNode(None, str(data)) + #if alias_key is not None: + # self.represented_objects[alias_key] = node + return node + + @classmethod + def add_representer(cls, data_type, representer): + if not 'yaml_representers' in cls.__dict__: + cls.yaml_representers = cls.yaml_representers.copy() + cls.yaml_representers[data_type] = representer + + @classmethod + def add_multi_representer(cls, data_type, representer): + if not 'yaml_multi_representers' in cls.__dict__: + cls.yaml_multi_representers = cls.yaml_multi_representers.copy() + cls.yaml_multi_representers[data_type] = representer + + def represent_scalar(self, tag, value, style=None): + if style is None: + style = self.default_style + node = ScalarNode(tag, value, style=style) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + return node + + def represent_sequence(self, tag, sequence, flow_style=None): + value = [] + node = SequenceNode(tag, value, flow_style=flow_style) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + best_style = True + for item in sequence: + node_item = self.represent_data(item) + if not (isinstance(node_item, ScalarNode) and not node_item.style): + best_style = False + value.append(node_item) + if flow_style is None: + if self.default_flow_style is not None: + node.flow_style = self.default_flow_style + else: + node.flow_style = best_style + return node + + def represent_mapping(self, tag, mapping, flow_style=None): + value = [] + node = MappingNode(tag, value, flow_style=flow_style) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + best_style = True + if hasattr(mapping, 'items'): + mapping = list(mapping.items()) + if self.sort_keys: + try: + mapping = sorted(mapping) + except TypeError: + pass + for item_key, item_value in mapping: + node_key = self.represent_data(item_key) + node_value = self.represent_data(item_value) + if not (isinstance(node_key, ScalarNode) and not node_key.style): + best_style = False + if not (isinstance(node_value, ScalarNode) and not node_value.style): + best_style = False + value.append((node_key, node_value)) + if flow_style is None: + if self.default_flow_style is not None: + node.flow_style = self.default_flow_style + else: + node.flow_style = best_style + return node + + def ignore_aliases(self, data): + return False + +class SafeRepresenter(BaseRepresenter): + + def ignore_aliases(self, data): + if data is None: + return True + if isinstance(data, tuple) and data == (): + return True + if isinstance(data, (str, bytes, bool, int, float)): + return True + + def represent_none(self, data): + return self.represent_scalar('tag:yaml.org,2002:null', 'null') + + def represent_str(self, data): + return self.represent_scalar('tag:yaml.org,2002:str', data) + + def represent_binary(self, data): + if hasattr(base64, 'encodebytes'): + data = base64.encodebytes(data).decode('ascii') + else: + data = base64.encodestring(data).decode('ascii') + return self.represent_scalar('tag:yaml.org,2002:binary', data, style='|') + + def represent_bool(self, data): + if data: + value = 'true' + else: + value = 'false' + return self.represent_scalar('tag:yaml.org,2002:bool', value) + + def represent_int(self, data): + return self.represent_scalar('tag:yaml.org,2002:int', str(data)) + + inf_value = 1e300 + while repr(inf_value) != repr(inf_value*inf_value): + inf_value *= inf_value + + def represent_float(self, data): + if data != data or (data == 0.0 and data == 1.0): + value = '.nan' + elif data == self.inf_value: + value = '.inf' + elif data == -self.inf_value: + value = '-.inf' + else: + value = repr(data).lower() + # Note that in some cases `repr(data)` represents a float number + # without the decimal parts. For instance: + # >>> repr(1e17) + # '1e17' + # Unfortunately, this is not a valid float representation according + # to the definition of the `!!float` tag. We fix this by adding + # '.0' before the 'e' symbol. + if '.' not in value and 'e' in value: + value = value.replace('e', '.0e', 1) + return self.represent_scalar('tag:yaml.org,2002:float', value) + + def represent_list(self, data): + #pairs = (len(data) > 0 and isinstance(data, list)) + #if pairs: + # for item in data: + # if not isinstance(item, tuple) or len(item) != 2: + # pairs = False + # break + #if not pairs: + return self.represent_sequence('tag:yaml.org,2002:seq', data) + #value = [] + #for item_key, item_value in data: + # value.append(self.represent_mapping(u'tag:yaml.org,2002:map', + # [(item_key, item_value)])) + #return SequenceNode(u'tag:yaml.org,2002:pairs', value) + + def represent_dict(self, data): + return self.represent_mapping('tag:yaml.org,2002:map', data) + + def represent_set(self, data): + value = {} + for key in data: + value[key] = None + return self.represent_mapping('tag:yaml.org,2002:set', value) + + def represent_date(self, data): + value = data.isoformat() + return self.represent_scalar('tag:yaml.org,2002:timestamp', value) + + def represent_datetime(self, data): + value = data.isoformat(' ') + return self.represent_scalar('tag:yaml.org,2002:timestamp', value) + + def represent_yaml_object(self, tag, data, cls, flow_style=None): + if hasattr(data, '__getstate__'): + state = data.__getstate__() + else: + state = data.__dict__.copy() + return self.represent_mapping(tag, state, flow_style=flow_style) + + def represent_undefined(self, data): + raise RepresenterError("cannot represent an object", data) + +SafeRepresenter.add_representer(type(None), + SafeRepresenter.represent_none) + +SafeRepresenter.add_representer(str, + SafeRepresenter.represent_str) + +SafeRepresenter.add_representer(bytes, + SafeRepresenter.represent_binary) + +SafeRepresenter.add_representer(bool, + SafeRepresenter.represent_bool) + +SafeRepresenter.add_representer(int, + SafeRepresenter.represent_int) + +SafeRepresenter.add_representer(float, + SafeRepresenter.represent_float) + +SafeRepresenter.add_representer(list, + SafeRepresenter.represent_list) + +SafeRepresenter.add_representer(tuple, + SafeRepresenter.represent_list) + +SafeRepresenter.add_representer(dict, + SafeRepresenter.represent_dict) + +SafeRepresenter.add_representer(set, + SafeRepresenter.represent_set) + +SafeRepresenter.add_representer(datetime.date, + SafeRepresenter.represent_date) + +SafeRepresenter.add_representer(datetime.datetime, + SafeRepresenter.represent_datetime) + +SafeRepresenter.add_representer(None, + SafeRepresenter.represent_undefined) + +class Representer(SafeRepresenter): + + def represent_complex(self, data): + if data.imag == 0.0: + data = '%r' % data.real + elif data.real == 0.0: + data = '%rj' % data.imag + elif data.imag > 0: + data = '%r+%rj' % (data.real, data.imag) + else: + data = '%r%rj' % (data.real, data.imag) + return self.represent_scalar('tag:yaml.org,2002:python/complex', data) + + def represent_tuple(self, data): + return self.represent_sequence('tag:yaml.org,2002:python/tuple', data) + + def represent_name(self, data): + name = '%s.%s' % (data.__module__, data.__name__) + return self.represent_scalar('tag:yaml.org,2002:python/name:'+name, '') + + def represent_module(self, data): + return self.represent_scalar( + 'tag:yaml.org,2002:python/module:'+data.__name__, '') + + def represent_object(self, data): + # We use __reduce__ API to save the data. data.__reduce__ returns + # a tuple of length 2-5: + # (function, args, state, listitems, dictitems) + + # For reconstructing, we calls function(*args), then set its state, + # listitems, and dictitems if they are not None. + + # A special case is when function.__name__ == '__newobj__'. In this + # case we create the object with args[0].__new__(*args). + + # Another special case is when __reduce__ returns a string - we don't + # support it. + + # We produce a !!python/object, !!python/object/new or + # !!python/object/apply node. + + cls = type(data) + if cls in copyreg.dispatch_table: + reduce = copyreg.dispatch_table[cls](data) + elif hasattr(data, '__reduce_ex__'): + reduce = data.__reduce_ex__(2) + elif hasattr(data, '__reduce__'): + reduce = data.__reduce__() + else: + raise RepresenterError("cannot represent an object", data) + reduce = (list(reduce)+[None]*5)[:5] + function, args, state, listitems, dictitems = reduce + args = list(args) + if state is None: + state = {} + if listitems is not None: + listitems = list(listitems) + if dictitems is not None: + dictitems = dict(dictitems) + if function.__name__ == '__newobj__': + function = args[0] + args = args[1:] + tag = 'tag:yaml.org,2002:python/object/new:' + newobj = True + else: + tag = 'tag:yaml.org,2002:python/object/apply:' + newobj = False + function_name = '%s.%s' % (function.__module__, function.__name__) + if not args and not listitems and not dictitems \ + and isinstance(state, dict) and newobj: + return self.represent_mapping( + 'tag:yaml.org,2002:python/object:'+function_name, state) + if not listitems and not dictitems \ + and isinstance(state, dict) and not state: + return self.represent_sequence(tag+function_name, args) + value = {} + if args: + value['args'] = args + if state or not isinstance(state, dict): + value['state'] = state + if listitems: + value['listitems'] = listitems + if dictitems: + value['dictitems'] = dictitems + return self.represent_mapping(tag+function_name, value) + + def represent_ordered_dict(self, data): + # Provide uniform representation across different Python versions. + data_type = type(data) + tag = 'tag:yaml.org,2002:python/object/apply:%s.%s' \ + % (data_type.__module__, data_type.__name__) + items = [[key, value] for key, value in data.items()] + return self.represent_sequence(tag, [items]) + +Representer.add_representer(complex, + Representer.represent_complex) + +Representer.add_representer(tuple, + Representer.represent_tuple) + +Representer.add_multi_representer(type, + Representer.represent_name) + +Representer.add_representer(collections.OrderedDict, + Representer.represent_ordered_dict) + +Representer.add_representer(types.FunctionType, + Representer.represent_name) + +Representer.add_representer(types.BuiltinFunctionType, + Representer.represent_name) + +Representer.add_representer(types.ModuleType, + Representer.represent_module) + +Representer.add_multi_representer(object, + Representer.represent_object) + diff --git a/hackaton/lib/python3.12/site-packages/yaml/resolver.py b/hackaton/lib/python3.12/site-packages/yaml/resolver.py new file mode 100644 index 0000000..3522bda --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/yaml/resolver.py @@ -0,0 +1,227 @@ + +__all__ = ['BaseResolver', 'Resolver'] + +from .error import * +from .nodes import * + +import re + +class ResolverError(YAMLError): + pass + +class BaseResolver: + + DEFAULT_SCALAR_TAG = 'tag:yaml.org,2002:str' + DEFAULT_SEQUENCE_TAG = 'tag:yaml.org,2002:seq' + DEFAULT_MAPPING_TAG = 'tag:yaml.org,2002:map' + + yaml_implicit_resolvers = {} + yaml_path_resolvers = {} + + def __init__(self): + self.resolver_exact_paths = [] + self.resolver_prefix_paths = [] + + @classmethod + def add_implicit_resolver(cls, tag, regexp, first): + if not 'yaml_implicit_resolvers' in cls.__dict__: + implicit_resolvers = {} + for key in cls.yaml_implicit_resolvers: + implicit_resolvers[key] = cls.yaml_implicit_resolvers[key][:] + cls.yaml_implicit_resolvers = implicit_resolvers + if first is None: + first = [None] + for ch in first: + cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp)) + + @classmethod + def add_path_resolver(cls, tag, path, kind=None): + # Note: `add_path_resolver` is experimental. The API could be changed. + # `new_path` is a pattern that is matched against the path from the + # root to the node that is being considered. `node_path` elements are + # tuples `(node_check, index_check)`. `node_check` is a node class: + # `ScalarNode`, `SequenceNode`, `MappingNode` or `None`. `None` + # matches any kind of a node. `index_check` could be `None`, a boolean + # value, a string value, or a number. `None` and `False` match against + # any _value_ of sequence and mapping nodes. `True` matches against + # any _key_ of a mapping node. A string `index_check` matches against + # a mapping value that corresponds to a scalar key which content is + # equal to the `index_check` value. An integer `index_check` matches + # against a sequence value with the index equal to `index_check`. + if not 'yaml_path_resolvers' in cls.__dict__: + cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy() + new_path = [] + for element in path: + if isinstance(element, (list, tuple)): + if len(element) == 2: + node_check, index_check = element + elif len(element) == 1: + node_check = element[0] + index_check = True + else: + raise ResolverError("Invalid path element: %s" % element) + else: + node_check = None + index_check = element + if node_check is str: + node_check = ScalarNode + elif node_check is list: + node_check = SequenceNode + elif node_check is dict: + node_check = MappingNode + elif node_check not in [ScalarNode, SequenceNode, MappingNode] \ + and not isinstance(node_check, str) \ + and node_check is not None: + raise ResolverError("Invalid node checker: %s" % node_check) + if not isinstance(index_check, (str, int)) \ + and index_check is not None: + raise ResolverError("Invalid index checker: %s" % index_check) + new_path.append((node_check, index_check)) + if kind is str: + kind = ScalarNode + elif kind is list: + kind = SequenceNode + elif kind is dict: + kind = MappingNode + elif kind not in [ScalarNode, SequenceNode, MappingNode] \ + and kind is not None: + raise ResolverError("Invalid node kind: %s" % kind) + cls.yaml_path_resolvers[tuple(new_path), kind] = tag + + def descend_resolver(self, current_node, current_index): + if not self.yaml_path_resolvers: + return + exact_paths = {} + prefix_paths = [] + if current_node: + depth = len(self.resolver_prefix_paths) + for path, kind in self.resolver_prefix_paths[-1]: + if self.check_resolver_prefix(depth, path, kind, + current_node, current_index): + if len(path) > depth: + prefix_paths.append((path, kind)) + else: + exact_paths[kind] = self.yaml_path_resolvers[path, kind] + else: + for path, kind in self.yaml_path_resolvers: + if not path: + exact_paths[kind] = self.yaml_path_resolvers[path, kind] + else: + prefix_paths.append((path, kind)) + self.resolver_exact_paths.append(exact_paths) + self.resolver_prefix_paths.append(prefix_paths) + + def ascend_resolver(self): + if not self.yaml_path_resolvers: + return + self.resolver_exact_paths.pop() + self.resolver_prefix_paths.pop() + + def check_resolver_prefix(self, depth, path, kind, + current_node, current_index): + node_check, index_check = path[depth-1] + if isinstance(node_check, str): + if current_node.tag != node_check: + return + elif node_check is not None: + if not isinstance(current_node, node_check): + return + if index_check is True and current_index is not None: + return + if (index_check is False or index_check is None) \ + and current_index is None: + return + if isinstance(index_check, str): + if not (isinstance(current_index, ScalarNode) + and index_check == current_index.value): + return + elif isinstance(index_check, int) and not isinstance(index_check, bool): + if index_check != current_index: + return + return True + + def resolve(self, kind, value, implicit): + if kind is ScalarNode and implicit[0]: + if value == '': + resolvers = self.yaml_implicit_resolvers.get('', []) + else: + resolvers = self.yaml_implicit_resolvers.get(value[0], []) + wildcard_resolvers = self.yaml_implicit_resolvers.get(None, []) + for tag, regexp in resolvers + wildcard_resolvers: + if regexp.match(value): + return tag + implicit = implicit[1] + if self.yaml_path_resolvers: + exact_paths = self.resolver_exact_paths[-1] + if kind in exact_paths: + return exact_paths[kind] + if None in exact_paths: + return exact_paths[None] + if kind is ScalarNode: + return self.DEFAULT_SCALAR_TAG + elif kind is SequenceNode: + return self.DEFAULT_SEQUENCE_TAG + elif kind is MappingNode: + return self.DEFAULT_MAPPING_TAG + +class Resolver(BaseResolver): + pass + +Resolver.add_implicit_resolver( + 'tag:yaml.org,2002:bool', + re.compile(r'''^(?:yes|Yes|YES|no|No|NO + |true|True|TRUE|false|False|FALSE + |on|On|ON|off|Off|OFF)$''', re.X), + list('yYnNtTfFoO')) + +Resolver.add_implicit_resolver( + 'tag:yaml.org,2002:float', + re.compile(r'''^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+][0-9]+)? + |\.[0-9][0-9_]*(?:[eE][-+][0-9]+)? + |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]* + |[-+]?\.(?:inf|Inf|INF) + |\.(?:nan|NaN|NAN))$''', re.X), + list('-+0123456789.')) + +Resolver.add_implicit_resolver( + 'tag:yaml.org,2002:int', + re.compile(r'''^(?:[-+]?0b[0-1_]+ + |[-+]?0[0-7_]+ + |[-+]?(?:0|[1-9][0-9_]*) + |[-+]?0x[0-9a-fA-F_]+ + |[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X), + list('-+0123456789')) + +Resolver.add_implicit_resolver( + 'tag:yaml.org,2002:merge', + re.compile(r'^(?:<<)$'), + ['<']) + +Resolver.add_implicit_resolver( + 'tag:yaml.org,2002:null', + re.compile(r'''^(?: ~ + |null|Null|NULL + | )$''', re.X), + ['~', 'n', 'N', '']) + +Resolver.add_implicit_resolver( + 'tag:yaml.org,2002:timestamp', + re.compile(r'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9] + |[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]? + (?:[Tt]|[ \t]+)[0-9][0-9]? + :[0-9][0-9] :[0-9][0-9] (?:\.[0-9]*)? + (?:[ \t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X), + list('0123456789')) + +Resolver.add_implicit_resolver( + 'tag:yaml.org,2002:value', + re.compile(r'^(?:=)$'), + ['=']) + +# The following resolver is only for documentation purposes. It cannot work +# because plain scalars cannot start with '!', '&', or '*'. +Resolver.add_implicit_resolver( + 'tag:yaml.org,2002:yaml', + re.compile(r'^(?:!|&|\*)$'), + list('!&*')) + diff --git a/hackaton/lib/python3.12/site-packages/yaml/scanner.py b/hackaton/lib/python3.12/site-packages/yaml/scanner.py new file mode 100644 index 0000000..de925b0 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/yaml/scanner.py @@ -0,0 +1,1435 @@ + +# Scanner produces tokens of the following types: +# STREAM-START +# STREAM-END +# DIRECTIVE(name, value) +# DOCUMENT-START +# DOCUMENT-END +# BLOCK-SEQUENCE-START +# BLOCK-MAPPING-START +# BLOCK-END +# FLOW-SEQUENCE-START +# FLOW-MAPPING-START +# FLOW-SEQUENCE-END +# FLOW-MAPPING-END +# BLOCK-ENTRY +# FLOW-ENTRY +# KEY +# VALUE +# ALIAS(value) +# ANCHOR(value) +# TAG(value) +# SCALAR(value, plain, style) +# +# Read comments in the Scanner code for more details. +# + +__all__ = ['Scanner', 'ScannerError'] + +from .error import MarkedYAMLError +from .tokens import * + +class ScannerError(MarkedYAMLError): + pass + +class SimpleKey: + # See below simple keys treatment. + + def __init__(self, token_number, required, index, line, column, mark): + self.token_number = token_number + self.required = required + self.index = index + self.line = line + self.column = column + self.mark = mark + +class Scanner: + + def __init__(self): + """Initialize the scanner.""" + # It is assumed that Scanner and Reader will have a common descendant. + # Reader do the dirty work of checking for BOM and converting the + # input data to Unicode. It also adds NUL to the end. + # + # Reader supports the following methods + # self.peek(i=0) # peek the next i-th character + # self.prefix(l=1) # peek the next l characters + # self.forward(l=1) # read the next l characters and move the pointer. + + # Had we reached the end of the stream? + self.done = False + + # The number of unclosed '{' and '['. `flow_level == 0` means block + # context. + self.flow_level = 0 + + # List of processed tokens that are not yet emitted. + self.tokens = [] + + # Add the STREAM-START token. + self.fetch_stream_start() + + # Number of tokens that were emitted through the `get_token` method. + self.tokens_taken = 0 + + # The current indentation level. + self.indent = -1 + + # Past indentation levels. + self.indents = [] + + # Variables related to simple keys treatment. + + # A simple key is a key that is not denoted by the '?' indicator. + # Example of simple keys: + # --- + # block simple key: value + # ? not a simple key: + # : { flow simple key: value } + # We emit the KEY token before all keys, so when we find a potential + # simple key, we try to locate the corresponding ':' indicator. + # Simple keys should be limited to a single line and 1024 characters. + + # Can a simple key start at the current position? A simple key may + # start: + # - at the beginning of the line, not counting indentation spaces + # (in block context), + # - after '{', '[', ',' (in the flow context), + # - after '?', ':', '-' (in the block context). + # In the block context, this flag also signifies if a block collection + # may start at the current position. + self.allow_simple_key = True + + # Keep track of possible simple keys. This is a dictionary. The key + # is `flow_level`; there can be no more that one possible simple key + # for each level. The value is a SimpleKey record: + # (token_number, required, index, line, column, mark) + # A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow), + # '[', or '{' tokens. + self.possible_simple_keys = {} + + # Public methods. + + def check_token(self, *choices): + # Check if the next token is one of the given types. + while self.need_more_tokens(): + self.fetch_more_tokens() + if self.tokens: + if not choices: + return True + for choice in choices: + if isinstance(self.tokens[0], choice): + return True + return False + + def peek_token(self): + # Return the next token, but do not delete if from the queue. + # Return None if no more tokens. + while self.need_more_tokens(): + self.fetch_more_tokens() + if self.tokens: + return self.tokens[0] + else: + return None + + def get_token(self): + # Return the next token. + while self.need_more_tokens(): + self.fetch_more_tokens() + if self.tokens: + self.tokens_taken += 1 + return self.tokens.pop(0) + + # Private methods. + + def need_more_tokens(self): + if self.done: + return False + if not self.tokens: + return True + # The current token may be a potential simple key, so we + # need to look further. + self.stale_possible_simple_keys() + if self.next_possible_simple_key() == self.tokens_taken: + return True + + def fetch_more_tokens(self): + + # Eat whitespaces and comments until we reach the next token. + self.scan_to_next_token() + + # Remove obsolete possible simple keys. + self.stale_possible_simple_keys() + + # Compare the current indentation and column. It may add some tokens + # and decrease the current indentation level. + self.unwind_indent(self.column) + + # Peek the next character. + ch = self.peek() + + # Is it the end of stream? + if ch == '\0': + return self.fetch_stream_end() + + # Is it a directive? + if ch == '%' and self.check_directive(): + return self.fetch_directive() + + # Is it the document start? + if ch == '-' and self.check_document_start(): + return self.fetch_document_start() + + # Is it the document end? + if ch == '.' and self.check_document_end(): + return self.fetch_document_end() + + # TODO: support for BOM within a stream. + #if ch == '\uFEFF': + # return self.fetch_bom() <-- issue BOMToken + + # Note: the order of the following checks is NOT significant. + + # Is it the flow sequence start indicator? + if ch == '[': + return self.fetch_flow_sequence_start() + + # Is it the flow mapping start indicator? + if ch == '{': + return self.fetch_flow_mapping_start() + + # Is it the flow sequence end indicator? + if ch == ']': + return self.fetch_flow_sequence_end() + + # Is it the flow mapping end indicator? + if ch == '}': + return self.fetch_flow_mapping_end() + + # Is it the flow entry indicator? + if ch == ',': + return self.fetch_flow_entry() + + # Is it the block entry indicator? + if ch == '-' and self.check_block_entry(): + return self.fetch_block_entry() + + # Is it the key indicator? + if ch == '?' and self.check_key(): + return self.fetch_key() + + # Is it the value indicator? + if ch == ':' and self.check_value(): + return self.fetch_value() + + # Is it an alias? + if ch == '*': + return self.fetch_alias() + + # Is it an anchor? + if ch == '&': + return self.fetch_anchor() + + # Is it a tag? + if ch == '!': + return self.fetch_tag() + + # Is it a literal scalar? + if ch == '|' and not self.flow_level: + return self.fetch_literal() + + # Is it a folded scalar? + if ch == '>' and not self.flow_level: + return self.fetch_folded() + + # Is it a single quoted scalar? + if ch == '\'': + return self.fetch_single() + + # Is it a double quoted scalar? + if ch == '\"': + return self.fetch_double() + + # It must be a plain scalar then. + if self.check_plain(): + return self.fetch_plain() + + # No? It's an error. Let's produce a nice error message. + raise ScannerError("while scanning for the next token", None, + "found character %r that cannot start any token" % ch, + self.get_mark()) + + # Simple keys treatment. + + def next_possible_simple_key(self): + # Return the number of the nearest possible simple key. Actually we + # don't need to loop through the whole dictionary. We may replace it + # with the following code: + # if not self.possible_simple_keys: + # return None + # return self.possible_simple_keys[ + # min(self.possible_simple_keys.keys())].token_number + min_token_number = None + for level in self.possible_simple_keys: + key = self.possible_simple_keys[level] + if min_token_number is None or key.token_number < min_token_number: + min_token_number = key.token_number + return min_token_number + + def stale_possible_simple_keys(self): + # Remove entries that are no longer possible simple keys. According to + # the YAML specification, simple keys + # - should be limited to a single line, + # - should be no longer than 1024 characters. + # Disabling this procedure will allow simple keys of any length and + # height (may cause problems if indentation is broken though). + for level in list(self.possible_simple_keys): + key = self.possible_simple_keys[level] + if key.line != self.line \ + or self.index-key.index > 1024: + if key.required: + raise ScannerError("while scanning a simple key", key.mark, + "could not find expected ':'", self.get_mark()) + del self.possible_simple_keys[level] + + def save_possible_simple_key(self): + # The next token may start a simple key. We check if it's possible + # and save its position. This function is called for + # ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'. + + # Check if a simple key is required at the current position. + required = not self.flow_level and self.indent == self.column + + # The next token might be a simple key. Let's save it's number and + # position. + if self.allow_simple_key: + self.remove_possible_simple_key() + token_number = self.tokens_taken+len(self.tokens) + key = SimpleKey(token_number, required, + self.index, self.line, self.column, self.get_mark()) + self.possible_simple_keys[self.flow_level] = key + + def remove_possible_simple_key(self): + # Remove the saved possible key position at the current flow level. + if self.flow_level in self.possible_simple_keys: + key = self.possible_simple_keys[self.flow_level] + + if key.required: + raise ScannerError("while scanning a simple key", key.mark, + "could not find expected ':'", self.get_mark()) + + del self.possible_simple_keys[self.flow_level] + + # Indentation functions. + + def unwind_indent(self, column): + + ## In flow context, tokens should respect indentation. + ## Actually the condition should be `self.indent >= column` according to + ## the spec. But this condition will prohibit intuitively correct + ## constructions such as + ## key : { + ## } + #if self.flow_level and self.indent > column: + # raise ScannerError(None, None, + # "invalid indentation or unclosed '[' or '{'", + # self.get_mark()) + + # In the flow context, indentation is ignored. We make the scanner less + # restrictive then specification requires. + if self.flow_level: + return + + # In block context, we may need to issue the BLOCK-END tokens. + while self.indent > column: + mark = self.get_mark() + self.indent = self.indents.pop() + self.tokens.append(BlockEndToken(mark, mark)) + + def add_indent(self, column): + # Check if we need to increase indentation. + if self.indent < column: + self.indents.append(self.indent) + self.indent = column + return True + return False + + # Fetchers. + + def fetch_stream_start(self): + # We always add STREAM-START as the first token and STREAM-END as the + # last token. + + # Read the token. + mark = self.get_mark() + + # Add STREAM-START. + self.tokens.append(StreamStartToken(mark, mark, + encoding=self.encoding)) + + + def fetch_stream_end(self): + + # Set the current indentation to -1. + self.unwind_indent(-1) + + # Reset simple keys. + self.remove_possible_simple_key() + self.allow_simple_key = False + self.possible_simple_keys = {} + + # Read the token. + mark = self.get_mark() + + # Add STREAM-END. + self.tokens.append(StreamEndToken(mark, mark)) + + # The steam is finished. + self.done = True + + def fetch_directive(self): + + # Set the current indentation to -1. + self.unwind_indent(-1) + + # Reset simple keys. + self.remove_possible_simple_key() + self.allow_simple_key = False + + # Scan and add DIRECTIVE. + self.tokens.append(self.scan_directive()) + + def fetch_document_start(self): + self.fetch_document_indicator(DocumentStartToken) + + def fetch_document_end(self): + self.fetch_document_indicator(DocumentEndToken) + + def fetch_document_indicator(self, TokenClass): + + # Set the current indentation to -1. + self.unwind_indent(-1) + + # Reset simple keys. Note that there could not be a block collection + # after '---'. + self.remove_possible_simple_key() + self.allow_simple_key = False + + # Add DOCUMENT-START or DOCUMENT-END. + start_mark = self.get_mark() + self.forward(3) + end_mark = self.get_mark() + self.tokens.append(TokenClass(start_mark, end_mark)) + + def fetch_flow_sequence_start(self): + self.fetch_flow_collection_start(FlowSequenceStartToken) + + def fetch_flow_mapping_start(self): + self.fetch_flow_collection_start(FlowMappingStartToken) + + def fetch_flow_collection_start(self, TokenClass): + + # '[' and '{' may start a simple key. + self.save_possible_simple_key() + + # Increase the flow level. + self.flow_level += 1 + + # Simple keys are allowed after '[' and '{'. + self.allow_simple_key = True + + # Add FLOW-SEQUENCE-START or FLOW-MAPPING-START. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(TokenClass(start_mark, end_mark)) + + def fetch_flow_sequence_end(self): + self.fetch_flow_collection_end(FlowSequenceEndToken) + + def fetch_flow_mapping_end(self): + self.fetch_flow_collection_end(FlowMappingEndToken) + + def fetch_flow_collection_end(self, TokenClass): + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Decrease the flow level. + self.flow_level -= 1 + + # No simple keys after ']' or '}'. + self.allow_simple_key = False + + # Add FLOW-SEQUENCE-END or FLOW-MAPPING-END. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(TokenClass(start_mark, end_mark)) + + def fetch_flow_entry(self): + + # Simple keys are allowed after ','. + self.allow_simple_key = True + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Add FLOW-ENTRY. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(FlowEntryToken(start_mark, end_mark)) + + def fetch_block_entry(self): + + # Block context needs additional checks. + if not self.flow_level: + + # Are we allowed to start a new entry? + if not self.allow_simple_key: + raise ScannerError(None, None, + "sequence entries are not allowed here", + self.get_mark()) + + # We may need to add BLOCK-SEQUENCE-START. + if self.add_indent(self.column): + mark = self.get_mark() + self.tokens.append(BlockSequenceStartToken(mark, mark)) + + # It's an error for the block entry to occur in the flow context, + # but we let the parser detect this. + else: + pass + + # Simple keys are allowed after '-'. + self.allow_simple_key = True + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Add BLOCK-ENTRY. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(BlockEntryToken(start_mark, end_mark)) + + def fetch_key(self): + + # Block context needs additional checks. + if not self.flow_level: + + # Are we allowed to start a key (not necessary a simple)? + if not self.allow_simple_key: + raise ScannerError(None, None, + "mapping keys are not allowed here", + self.get_mark()) + + # We may need to add BLOCK-MAPPING-START. + if self.add_indent(self.column): + mark = self.get_mark() + self.tokens.append(BlockMappingStartToken(mark, mark)) + + # Simple keys are allowed after '?' in the block context. + self.allow_simple_key = not self.flow_level + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Add KEY. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(KeyToken(start_mark, end_mark)) + + def fetch_value(self): + + # Do we determine a simple key? + if self.flow_level in self.possible_simple_keys: + + # Add KEY. + key = self.possible_simple_keys[self.flow_level] + del self.possible_simple_keys[self.flow_level] + self.tokens.insert(key.token_number-self.tokens_taken, + KeyToken(key.mark, key.mark)) + + # If this key starts a new block mapping, we need to add + # BLOCK-MAPPING-START. + if not self.flow_level: + if self.add_indent(key.column): + self.tokens.insert(key.token_number-self.tokens_taken, + BlockMappingStartToken(key.mark, key.mark)) + + # There cannot be two simple keys one after another. + self.allow_simple_key = False + + # It must be a part of a complex key. + else: + + # Block context needs additional checks. + # (Do we really need them? They will be caught by the parser + # anyway.) + if not self.flow_level: + + # We are allowed to start a complex value if and only if + # we can start a simple key. + if not self.allow_simple_key: + raise ScannerError(None, None, + "mapping values are not allowed here", + self.get_mark()) + + # If this value starts a new block mapping, we need to add + # BLOCK-MAPPING-START. It will be detected as an error later by + # the parser. + if not self.flow_level: + if self.add_indent(self.column): + mark = self.get_mark() + self.tokens.append(BlockMappingStartToken(mark, mark)) + + # Simple keys are allowed after ':' in the block context. + self.allow_simple_key = not self.flow_level + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Add VALUE. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(ValueToken(start_mark, end_mark)) + + def fetch_alias(self): + + # ALIAS could be a simple key. + self.save_possible_simple_key() + + # No simple keys after ALIAS. + self.allow_simple_key = False + + # Scan and add ALIAS. + self.tokens.append(self.scan_anchor(AliasToken)) + + def fetch_anchor(self): + + # ANCHOR could start a simple key. + self.save_possible_simple_key() + + # No simple keys after ANCHOR. + self.allow_simple_key = False + + # Scan and add ANCHOR. + self.tokens.append(self.scan_anchor(AnchorToken)) + + def fetch_tag(self): + + # TAG could start a simple key. + self.save_possible_simple_key() + + # No simple keys after TAG. + self.allow_simple_key = False + + # Scan and add TAG. + self.tokens.append(self.scan_tag()) + + def fetch_literal(self): + self.fetch_block_scalar(style='|') + + def fetch_folded(self): + self.fetch_block_scalar(style='>') + + def fetch_block_scalar(self, style): + + # A simple key may follow a block scalar. + self.allow_simple_key = True + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Scan and add SCALAR. + self.tokens.append(self.scan_block_scalar(style)) + + def fetch_single(self): + self.fetch_flow_scalar(style='\'') + + def fetch_double(self): + self.fetch_flow_scalar(style='"') + + def fetch_flow_scalar(self, style): + + # A flow scalar could be a simple key. + self.save_possible_simple_key() + + # No simple keys after flow scalars. + self.allow_simple_key = False + + # Scan and add SCALAR. + self.tokens.append(self.scan_flow_scalar(style)) + + def fetch_plain(self): + + # A plain scalar could be a simple key. + self.save_possible_simple_key() + + # No simple keys after plain scalars. But note that `scan_plain` will + # change this flag if the scan is finished at the beginning of the + # line. + self.allow_simple_key = False + + # Scan and add SCALAR. May change `allow_simple_key`. + self.tokens.append(self.scan_plain()) + + # Checkers. + + def check_directive(self): + + # DIRECTIVE: ^ '%' ... + # The '%' indicator is already checked. + if self.column == 0: + return True + + def check_document_start(self): + + # DOCUMENT-START: ^ '---' (' '|'\n') + if self.column == 0: + if self.prefix(3) == '---' \ + and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029': + return True + + def check_document_end(self): + + # DOCUMENT-END: ^ '...' (' '|'\n') + if self.column == 0: + if self.prefix(3) == '...' \ + and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029': + return True + + def check_block_entry(self): + + # BLOCK-ENTRY: '-' (' '|'\n') + return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029' + + def check_key(self): + + # KEY(flow context): '?' + if self.flow_level: + return True + + # KEY(block context): '?' (' '|'\n') + else: + return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029' + + def check_value(self): + + # VALUE(flow context): ':' + if self.flow_level: + return True + + # VALUE(block context): ':' (' '|'\n') + else: + return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029' + + def check_plain(self): + + # A plain scalar may start with any non-space character except: + # '-', '?', ':', ',', '[', ']', '{', '}', + # '#', '&', '*', '!', '|', '>', '\'', '\"', + # '%', '@', '`'. + # + # It may also start with + # '-', '?', ':' + # if it is followed by a non-space character. + # + # Note that we limit the last rule to the block context (except the + # '-' character) because we want the flow context to be space + # independent. + ch = self.peek() + return ch not in '\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`' \ + or (self.peek(1) not in '\0 \t\r\n\x85\u2028\u2029' + and (ch == '-' or (not self.flow_level and ch in '?:'))) + + # Scanners. + + def scan_to_next_token(self): + # We ignore spaces, line breaks and comments. + # If we find a line break in the block context, we set the flag + # `allow_simple_key` on. + # The byte order mark is stripped if it's the first character in the + # stream. We do not yet support BOM inside the stream as the + # specification requires. Any such mark will be considered as a part + # of the document. + # + # TODO: We need to make tab handling rules more sane. A good rule is + # Tabs cannot precede tokens + # BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END, + # KEY(block), VALUE(block), BLOCK-ENTRY + # So the checking code is + # if : + # self.allow_simple_keys = False + # We also need to add the check for `allow_simple_keys == True` to + # `unwind_indent` before issuing BLOCK-END. + # Scanners for block, flow, and plain scalars need to be modified. + + if self.index == 0 and self.peek() == '\uFEFF': + self.forward() + found = False + while not found: + while self.peek() == ' ': + self.forward() + if self.peek() == '#': + while self.peek() not in '\0\r\n\x85\u2028\u2029': + self.forward() + if self.scan_line_break(): + if not self.flow_level: + self.allow_simple_key = True + else: + found = True + + def scan_directive(self): + # See the specification for details. + start_mark = self.get_mark() + self.forward() + name = self.scan_directive_name(start_mark) + value = None + if name == 'YAML': + value = self.scan_yaml_directive_value(start_mark) + end_mark = self.get_mark() + elif name == 'TAG': + value = self.scan_tag_directive_value(start_mark) + end_mark = self.get_mark() + else: + end_mark = self.get_mark() + while self.peek() not in '\0\r\n\x85\u2028\u2029': + self.forward() + self.scan_directive_ignored_line(start_mark) + return DirectiveToken(name, value, start_mark, end_mark) + + def scan_directive_name(self, start_mark): + # See the specification for details. + length = 0 + ch = self.peek(length) + while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \ + or ch in '-_': + length += 1 + ch = self.peek(length) + if not length: + raise ScannerError("while scanning a directive", start_mark, + "expected alphabetic or numeric character, but found %r" + % ch, self.get_mark()) + value = self.prefix(length) + self.forward(length) + ch = self.peek() + if ch not in '\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a directive", start_mark, + "expected alphabetic or numeric character, but found %r" + % ch, self.get_mark()) + return value + + def scan_yaml_directive_value(self, start_mark): + # See the specification for details. + while self.peek() == ' ': + self.forward() + major = self.scan_yaml_directive_number(start_mark) + if self.peek() != '.': + raise ScannerError("while scanning a directive", start_mark, + "expected a digit or '.', but found %r" % self.peek(), + self.get_mark()) + self.forward() + minor = self.scan_yaml_directive_number(start_mark) + if self.peek() not in '\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a directive", start_mark, + "expected a digit or ' ', but found %r" % self.peek(), + self.get_mark()) + return (major, minor) + + def scan_yaml_directive_number(self, start_mark): + # See the specification for details. + ch = self.peek() + if not ('0' <= ch <= '9'): + raise ScannerError("while scanning a directive", start_mark, + "expected a digit, but found %r" % ch, self.get_mark()) + length = 0 + while '0' <= self.peek(length) <= '9': + length += 1 + value = int(self.prefix(length)) + self.forward(length) + return value + + def scan_tag_directive_value(self, start_mark): + # See the specification for details. + while self.peek() == ' ': + self.forward() + handle = self.scan_tag_directive_handle(start_mark) + while self.peek() == ' ': + self.forward() + prefix = self.scan_tag_directive_prefix(start_mark) + return (handle, prefix) + + def scan_tag_directive_handle(self, start_mark): + # See the specification for details. + value = self.scan_tag_handle('directive', start_mark) + ch = self.peek() + if ch != ' ': + raise ScannerError("while scanning a directive", start_mark, + "expected ' ', but found %r" % ch, self.get_mark()) + return value + + def scan_tag_directive_prefix(self, start_mark): + # See the specification for details. + value = self.scan_tag_uri('directive', start_mark) + ch = self.peek() + if ch not in '\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a directive", start_mark, + "expected ' ', but found %r" % ch, self.get_mark()) + return value + + def scan_directive_ignored_line(self, start_mark): + # See the specification for details. + while self.peek() == ' ': + self.forward() + if self.peek() == '#': + while self.peek() not in '\0\r\n\x85\u2028\u2029': + self.forward() + ch = self.peek() + if ch not in '\0\r\n\x85\u2028\u2029': + raise ScannerError("while scanning a directive", start_mark, + "expected a comment or a line break, but found %r" + % ch, self.get_mark()) + self.scan_line_break() + + def scan_anchor(self, TokenClass): + # The specification does not restrict characters for anchors and + # aliases. This may lead to problems, for instance, the document: + # [ *alias, value ] + # can be interpreted in two ways, as + # [ "value" ] + # and + # [ *alias , "value" ] + # Therefore we restrict aliases to numbers and ASCII letters. + start_mark = self.get_mark() + indicator = self.peek() + if indicator == '*': + name = 'alias' + else: + name = 'anchor' + self.forward() + length = 0 + ch = self.peek(length) + while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \ + or ch in '-_': + length += 1 + ch = self.peek(length) + if not length: + raise ScannerError("while scanning an %s" % name, start_mark, + "expected alphabetic or numeric character, but found %r" + % ch, self.get_mark()) + value = self.prefix(length) + self.forward(length) + ch = self.peek() + if ch not in '\0 \t\r\n\x85\u2028\u2029?:,]}%@`': + raise ScannerError("while scanning an %s" % name, start_mark, + "expected alphabetic or numeric character, but found %r" + % ch, self.get_mark()) + end_mark = self.get_mark() + return TokenClass(value, start_mark, end_mark) + + def scan_tag(self): + # See the specification for details. + start_mark = self.get_mark() + ch = self.peek(1) + if ch == '<': + handle = None + self.forward(2) + suffix = self.scan_tag_uri('tag', start_mark) + if self.peek() != '>': + raise ScannerError("while parsing a tag", start_mark, + "expected '>', but found %r" % self.peek(), + self.get_mark()) + self.forward() + elif ch in '\0 \t\r\n\x85\u2028\u2029': + handle = None + suffix = '!' + self.forward() + else: + length = 1 + use_handle = False + while ch not in '\0 \r\n\x85\u2028\u2029': + if ch == '!': + use_handle = True + break + length += 1 + ch = self.peek(length) + handle = '!' + if use_handle: + handle = self.scan_tag_handle('tag', start_mark) + else: + handle = '!' + self.forward() + suffix = self.scan_tag_uri('tag', start_mark) + ch = self.peek() + if ch not in '\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a tag", start_mark, + "expected ' ', but found %r" % ch, self.get_mark()) + value = (handle, suffix) + end_mark = self.get_mark() + return TagToken(value, start_mark, end_mark) + + def scan_block_scalar(self, style): + # See the specification for details. + + if style == '>': + folded = True + else: + folded = False + + chunks = [] + start_mark = self.get_mark() + + # Scan the header. + self.forward() + chomping, increment = self.scan_block_scalar_indicators(start_mark) + self.scan_block_scalar_ignored_line(start_mark) + + # Determine the indentation level and go to the first non-empty line. + min_indent = self.indent+1 + if min_indent < 1: + min_indent = 1 + if increment is None: + breaks, max_indent, end_mark = self.scan_block_scalar_indentation() + indent = max(min_indent, max_indent) + else: + indent = min_indent+increment-1 + breaks, end_mark = self.scan_block_scalar_breaks(indent) + line_break = '' + + # Scan the inner part of the block scalar. + while self.column == indent and self.peek() != '\0': + chunks.extend(breaks) + leading_non_space = self.peek() not in ' \t' + length = 0 + while self.peek(length) not in '\0\r\n\x85\u2028\u2029': + length += 1 + chunks.append(self.prefix(length)) + self.forward(length) + line_break = self.scan_line_break() + breaks, end_mark = self.scan_block_scalar_breaks(indent) + if self.column == indent and self.peek() != '\0': + + # Unfortunately, folding rules are ambiguous. + # + # This is the folding according to the specification: + + if folded and line_break == '\n' \ + and leading_non_space and self.peek() not in ' \t': + if not breaks: + chunks.append(' ') + else: + chunks.append(line_break) + + # This is Clark Evans's interpretation (also in the spec + # examples): + # + #if folded and line_break == '\n': + # if not breaks: + # if self.peek() not in ' \t': + # chunks.append(' ') + # else: + # chunks.append(line_break) + #else: + # chunks.append(line_break) + else: + break + + # Chomp the tail. + if chomping is not False: + chunks.append(line_break) + if chomping is True: + chunks.extend(breaks) + + # We are done. + return ScalarToken(''.join(chunks), False, start_mark, end_mark, + style) + + def scan_block_scalar_indicators(self, start_mark): + # See the specification for details. + chomping = None + increment = None + ch = self.peek() + if ch in '+-': + if ch == '+': + chomping = True + else: + chomping = False + self.forward() + ch = self.peek() + if ch in '0123456789': + increment = int(ch) + if increment == 0: + raise ScannerError("while scanning a block scalar", start_mark, + "expected indentation indicator in the range 1-9, but found 0", + self.get_mark()) + self.forward() + elif ch in '0123456789': + increment = int(ch) + if increment == 0: + raise ScannerError("while scanning a block scalar", start_mark, + "expected indentation indicator in the range 1-9, but found 0", + self.get_mark()) + self.forward() + ch = self.peek() + if ch in '+-': + if ch == '+': + chomping = True + else: + chomping = False + self.forward() + ch = self.peek() + if ch not in '\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a block scalar", start_mark, + "expected chomping or indentation indicators, but found %r" + % ch, self.get_mark()) + return chomping, increment + + def scan_block_scalar_ignored_line(self, start_mark): + # See the specification for details. + while self.peek() == ' ': + self.forward() + if self.peek() == '#': + while self.peek() not in '\0\r\n\x85\u2028\u2029': + self.forward() + ch = self.peek() + if ch not in '\0\r\n\x85\u2028\u2029': + raise ScannerError("while scanning a block scalar", start_mark, + "expected a comment or a line break, but found %r" % ch, + self.get_mark()) + self.scan_line_break() + + def scan_block_scalar_indentation(self): + # See the specification for details. + chunks = [] + max_indent = 0 + end_mark = self.get_mark() + while self.peek() in ' \r\n\x85\u2028\u2029': + if self.peek() != ' ': + chunks.append(self.scan_line_break()) + end_mark = self.get_mark() + else: + self.forward() + if self.column > max_indent: + max_indent = self.column + return chunks, max_indent, end_mark + + def scan_block_scalar_breaks(self, indent): + # See the specification for details. + chunks = [] + end_mark = self.get_mark() + while self.column < indent and self.peek() == ' ': + self.forward() + while self.peek() in '\r\n\x85\u2028\u2029': + chunks.append(self.scan_line_break()) + end_mark = self.get_mark() + while self.column < indent and self.peek() == ' ': + self.forward() + return chunks, end_mark + + def scan_flow_scalar(self, style): + # See the specification for details. + # Note that we loose indentation rules for quoted scalars. Quoted + # scalars don't need to adhere indentation because " and ' clearly + # mark the beginning and the end of them. Therefore we are less + # restrictive then the specification requires. We only need to check + # that document separators are not included in scalars. + if style == '"': + double = True + else: + double = False + chunks = [] + start_mark = self.get_mark() + quote = self.peek() + self.forward() + chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark)) + while self.peek() != quote: + chunks.extend(self.scan_flow_scalar_spaces(double, start_mark)) + chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark)) + self.forward() + end_mark = self.get_mark() + return ScalarToken(''.join(chunks), False, start_mark, end_mark, + style) + + ESCAPE_REPLACEMENTS = { + '0': '\0', + 'a': '\x07', + 'b': '\x08', + 't': '\x09', + '\t': '\x09', + 'n': '\x0A', + 'v': '\x0B', + 'f': '\x0C', + 'r': '\x0D', + 'e': '\x1B', + ' ': '\x20', + '\"': '\"', + '\\': '\\', + '/': '/', + 'N': '\x85', + '_': '\xA0', + 'L': '\u2028', + 'P': '\u2029', + } + + ESCAPE_CODES = { + 'x': 2, + 'u': 4, + 'U': 8, + } + + def scan_flow_scalar_non_spaces(self, double, start_mark): + # See the specification for details. + chunks = [] + while True: + length = 0 + while self.peek(length) not in '\'\"\\\0 \t\r\n\x85\u2028\u2029': + length += 1 + if length: + chunks.append(self.prefix(length)) + self.forward(length) + ch = self.peek() + if not double and ch == '\'' and self.peek(1) == '\'': + chunks.append('\'') + self.forward(2) + elif (double and ch == '\'') or (not double and ch in '\"\\'): + chunks.append(ch) + self.forward() + elif double and ch == '\\': + self.forward() + ch = self.peek() + if ch in self.ESCAPE_REPLACEMENTS: + chunks.append(self.ESCAPE_REPLACEMENTS[ch]) + self.forward() + elif ch in self.ESCAPE_CODES: + length = self.ESCAPE_CODES[ch] + self.forward() + for k in range(length): + if self.peek(k) not in '0123456789ABCDEFabcdef': + raise ScannerError("while scanning a double-quoted scalar", start_mark, + "expected escape sequence of %d hexadecimal numbers, but found %r" % + (length, self.peek(k)), self.get_mark()) + code = int(self.prefix(length), 16) + chunks.append(chr(code)) + self.forward(length) + elif ch in '\r\n\x85\u2028\u2029': + self.scan_line_break() + chunks.extend(self.scan_flow_scalar_breaks(double, start_mark)) + else: + raise ScannerError("while scanning a double-quoted scalar", start_mark, + "found unknown escape character %r" % ch, self.get_mark()) + else: + return chunks + + def scan_flow_scalar_spaces(self, double, start_mark): + # See the specification for details. + chunks = [] + length = 0 + while self.peek(length) in ' \t': + length += 1 + whitespaces = self.prefix(length) + self.forward(length) + ch = self.peek() + if ch == '\0': + raise ScannerError("while scanning a quoted scalar", start_mark, + "found unexpected end of stream", self.get_mark()) + elif ch in '\r\n\x85\u2028\u2029': + line_break = self.scan_line_break() + breaks = self.scan_flow_scalar_breaks(double, start_mark) + if line_break != '\n': + chunks.append(line_break) + elif not breaks: + chunks.append(' ') + chunks.extend(breaks) + else: + chunks.append(whitespaces) + return chunks + + def scan_flow_scalar_breaks(self, double, start_mark): + # See the specification for details. + chunks = [] + while True: + # Instead of checking indentation, we check for document + # separators. + prefix = self.prefix(3) + if (prefix == '---' or prefix == '...') \ + and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029': + raise ScannerError("while scanning a quoted scalar", start_mark, + "found unexpected document separator", self.get_mark()) + while self.peek() in ' \t': + self.forward() + if self.peek() in '\r\n\x85\u2028\u2029': + chunks.append(self.scan_line_break()) + else: + return chunks + + def scan_plain(self): + # See the specification for details. + # We add an additional restriction for the flow context: + # plain scalars in the flow context cannot contain ',' or '?'. + # We also keep track of the `allow_simple_key` flag here. + # Indentation rules are loosed for the flow context. + chunks = [] + start_mark = self.get_mark() + end_mark = start_mark + indent = self.indent+1 + # We allow zero indentation for scalars, but then we need to check for + # document separators at the beginning of the line. + #if indent == 0: + # indent = 1 + spaces = [] + while True: + length = 0 + if self.peek() == '#': + break + while True: + ch = self.peek(length) + if ch in '\0 \t\r\n\x85\u2028\u2029' \ + or (ch == ':' and + self.peek(length+1) in '\0 \t\r\n\x85\u2028\u2029' + + (u',[]{}' if self.flow_level else u''))\ + or (self.flow_level and ch in ',?[]{}'): + break + length += 1 + if length == 0: + break + self.allow_simple_key = False + chunks.extend(spaces) + chunks.append(self.prefix(length)) + self.forward(length) + end_mark = self.get_mark() + spaces = self.scan_plain_spaces(indent, start_mark) + if not spaces or self.peek() == '#' \ + or (not self.flow_level and self.column < indent): + break + return ScalarToken(''.join(chunks), True, start_mark, end_mark) + + def scan_plain_spaces(self, indent, start_mark): + # See the specification for details. + # The specification is really confusing about tabs in plain scalars. + # We just forbid them completely. Do not use tabs in YAML! + chunks = [] + length = 0 + while self.peek(length) in ' ': + length += 1 + whitespaces = self.prefix(length) + self.forward(length) + ch = self.peek() + if ch in '\r\n\x85\u2028\u2029': + line_break = self.scan_line_break() + self.allow_simple_key = True + prefix = self.prefix(3) + if (prefix == '---' or prefix == '...') \ + and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029': + return + breaks = [] + while self.peek() in ' \r\n\x85\u2028\u2029': + if self.peek() == ' ': + self.forward() + else: + breaks.append(self.scan_line_break()) + prefix = self.prefix(3) + if (prefix == '---' or prefix == '...') \ + and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029': + return + if line_break != '\n': + chunks.append(line_break) + elif not breaks: + chunks.append(' ') + chunks.extend(breaks) + elif whitespaces: + chunks.append(whitespaces) + return chunks + + def scan_tag_handle(self, name, start_mark): + # See the specification for details. + # For some strange reasons, the specification does not allow '_' in + # tag handles. I have allowed it anyway. + ch = self.peek() + if ch != '!': + raise ScannerError("while scanning a %s" % name, start_mark, + "expected '!', but found %r" % ch, self.get_mark()) + length = 1 + ch = self.peek(length) + if ch != ' ': + while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \ + or ch in '-_': + length += 1 + ch = self.peek(length) + if ch != '!': + self.forward(length) + raise ScannerError("while scanning a %s" % name, start_mark, + "expected '!', but found %r" % ch, self.get_mark()) + length += 1 + value = self.prefix(length) + self.forward(length) + return value + + def scan_tag_uri(self, name, start_mark): + # See the specification for details. + # Note: we do not check if URI is well-formed. + chunks = [] + length = 0 + ch = self.peek(length) + while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \ + or ch in '-;/?:@&=+$,_.!~*\'()[]%': + if ch == '%': + chunks.append(self.prefix(length)) + self.forward(length) + length = 0 + chunks.append(self.scan_uri_escapes(name, start_mark)) + else: + length += 1 + ch = self.peek(length) + if length: + chunks.append(self.prefix(length)) + self.forward(length) + length = 0 + if not chunks: + raise ScannerError("while parsing a %s" % name, start_mark, + "expected URI, but found %r" % ch, self.get_mark()) + return ''.join(chunks) + + def scan_uri_escapes(self, name, start_mark): + # See the specification for details. + codes = [] + mark = self.get_mark() + while self.peek() == '%': + self.forward() + for k in range(2): + if self.peek(k) not in '0123456789ABCDEFabcdef': + raise ScannerError("while scanning a %s" % name, start_mark, + "expected URI escape sequence of 2 hexadecimal numbers, but found %r" + % self.peek(k), self.get_mark()) + codes.append(int(self.prefix(2), 16)) + self.forward(2) + try: + value = bytes(codes).decode('utf-8') + except UnicodeDecodeError as exc: + raise ScannerError("while scanning a %s" % name, start_mark, str(exc), mark) + return value + + def scan_line_break(self): + # Transforms: + # '\r\n' : '\n' + # '\r' : '\n' + # '\n' : '\n' + # '\x85' : '\n' + # '\u2028' : '\u2028' + # '\u2029 : '\u2029' + # default : '' + ch = self.peek() + if ch in '\r\n\x85': + if self.prefix(2) == '\r\n': + self.forward(2) + else: + self.forward() + return '\n' + elif ch in '\u2028\u2029': + self.forward() + return ch + return '' diff --git a/hackaton/lib/python3.12/site-packages/yaml/serializer.py b/hackaton/lib/python3.12/site-packages/yaml/serializer.py new file mode 100644 index 0000000..fe911e6 --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/yaml/serializer.py @@ -0,0 +1,111 @@ + +__all__ = ['Serializer', 'SerializerError'] + +from .error import YAMLError +from .events import * +from .nodes import * + +class SerializerError(YAMLError): + pass + +class Serializer: + + ANCHOR_TEMPLATE = 'id%03d' + + def __init__(self, encoding=None, + explicit_start=None, explicit_end=None, version=None, tags=None): + self.use_encoding = encoding + self.use_explicit_start = explicit_start + self.use_explicit_end = explicit_end + self.use_version = version + self.use_tags = tags + self.serialized_nodes = {} + self.anchors = {} + self.last_anchor_id = 0 + self.closed = None + + def open(self): + if self.closed is None: + self.emit(StreamStartEvent(encoding=self.use_encoding)) + self.closed = False + elif self.closed: + raise SerializerError("serializer is closed") + else: + raise SerializerError("serializer is already opened") + + def close(self): + if self.closed is None: + raise SerializerError("serializer is not opened") + elif not self.closed: + self.emit(StreamEndEvent()) + self.closed = True + + #def __del__(self): + # self.close() + + def serialize(self, node): + if self.closed is None: + raise SerializerError("serializer is not opened") + elif self.closed: + raise SerializerError("serializer is closed") + self.emit(DocumentStartEvent(explicit=self.use_explicit_start, + version=self.use_version, tags=self.use_tags)) + self.anchor_node(node) + self.serialize_node(node, None, None) + self.emit(DocumentEndEvent(explicit=self.use_explicit_end)) + self.serialized_nodes = {} + self.anchors = {} + self.last_anchor_id = 0 + + def anchor_node(self, node): + if node in self.anchors: + if self.anchors[node] is None: + self.anchors[node] = self.generate_anchor(node) + else: + self.anchors[node] = None + if isinstance(node, SequenceNode): + for item in node.value: + self.anchor_node(item) + elif isinstance(node, MappingNode): + for key, value in node.value: + self.anchor_node(key) + self.anchor_node(value) + + def generate_anchor(self, node): + self.last_anchor_id += 1 + return self.ANCHOR_TEMPLATE % self.last_anchor_id + + def serialize_node(self, node, parent, index): + alias = self.anchors[node] + if node in self.serialized_nodes: + self.emit(AliasEvent(alias)) + else: + self.serialized_nodes[node] = True + self.descend_resolver(parent, index) + if isinstance(node, ScalarNode): + detected_tag = self.resolve(ScalarNode, node.value, (True, False)) + default_tag = self.resolve(ScalarNode, node.value, (False, True)) + implicit = (node.tag == detected_tag), (node.tag == default_tag) + self.emit(ScalarEvent(alias, node.tag, implicit, node.value, + style=node.style)) + elif isinstance(node, SequenceNode): + implicit = (node.tag + == self.resolve(SequenceNode, node.value, True)) + self.emit(SequenceStartEvent(alias, node.tag, implicit, + flow_style=node.flow_style)) + index = 0 + for item in node.value: + self.serialize_node(item, node, index) + index += 1 + self.emit(SequenceEndEvent()) + elif isinstance(node, MappingNode): + implicit = (node.tag + == self.resolve(MappingNode, node.value, True)) + self.emit(MappingStartEvent(alias, node.tag, implicit, + flow_style=node.flow_style)) + for key, value in node.value: + self.serialize_node(key, node, None) + self.serialize_node(value, node, key) + self.emit(MappingEndEvent()) + self.ascend_resolver() + diff --git a/hackaton/lib/python3.12/site-packages/yaml/tokens.py b/hackaton/lib/python3.12/site-packages/yaml/tokens.py new file mode 100644 index 0000000..4d0b48a --- /dev/null +++ b/hackaton/lib/python3.12/site-packages/yaml/tokens.py @@ -0,0 +1,104 @@ + +class Token(object): + def __init__(self, start_mark, end_mark): + self.start_mark = start_mark + self.end_mark = end_mark + def __repr__(self): + attributes = [key for key in self.__dict__ + if not key.endswith('_mark')] + attributes.sort() + arguments = ', '.join(['%s=%r' % (key, getattr(self, key)) + for key in attributes]) + return '%s(%s)' % (self.__class__.__name__, arguments) + +#class BOMToken(Token): +# id = '' + +class DirectiveToken(Token): + id = '' + def __init__(self, name, value, start_mark, end_mark): + self.name = name + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + +class DocumentStartToken(Token): + id = '' + +class DocumentEndToken(Token): + id = '' + +class StreamStartToken(Token): + id = '' + def __init__(self, start_mark=None, end_mark=None, + encoding=None): + self.start_mark = start_mark + self.end_mark = end_mark + self.encoding = encoding + +class StreamEndToken(Token): + id = '' + +class BlockSequenceStartToken(Token): + id = '' + +class BlockMappingStartToken(Token): + id = '' + +class BlockEndToken(Token): + id = '' + +class FlowSequenceStartToken(Token): + id = '[' + +class FlowMappingStartToken(Token): + id = '{' + +class FlowSequenceEndToken(Token): + id = ']' + +class FlowMappingEndToken(Token): + id = '}' + +class KeyToken(Token): + id = '?' + +class ValueToken(Token): + id = ':' + +class BlockEntryToken(Token): + id = '-' + +class FlowEntryToken(Token): + id = ',' + +class AliasToken(Token): + id = '' + def __init__(self, value, start_mark, end_mark): + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + +class AnchorToken(Token): + id = '' + def __init__(self, value, start_mark, end_mark): + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + +class TagToken(Token): + id = '' + def __init__(self, value, start_mark, end_mark): + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + +class ScalarToken(Token): + id = '' + def __init__(self, value, plain, start_mark, end_mark, style=None): + self.value = value + self.plain = plain + self.start_mark = start_mark + self.end_mark = end_mark + self.style = style + diff --git a/main.py b/main.py index aa93730..ef123d9 100644 --- a/main.py +++ b/main.py @@ -1,9 +1,65 @@ from fastapi import FastAPI, WebSocket, WebSocketDisconnect +import speech_recognition as sr +from pydub import AudioSegment +from fastapi.middleware.cors import CORSMiddleware +from pydub import AudioSegment +import asyncio +import json +from stt import get_audio +AudioSegment.converter = "/opt/homebrew/bin/ffmpeg" # 실제 ffmpeg 설치 경로를 설정 from fastapi.staticfiles import StaticFiles from tts import speak app = FastAPI() +# CORS 설정 +origins = [ + "http://localhost:5173", # 프론트엔드가 로컬에서 실행 중인 경우 + "http://127.0.0.1:3000", # 추가 로컬 주소 +] +app.add_middleware( + CORSMiddleware, + allow_origins=origins, + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], +) + +@app.get("/speech-to-text") +async def stt(): + return get_audio() + +@app.websocket("/ws/speech-to-text/") +async def websocket_speech_to_text(websocket: WebSocket): + await websocket.accept() + listening = False # 음성 인식 중인지 여부 + + while True: + try: + # 클라이언트에서 메시지 수신 + message = await websocket.receive_text() + + if message == "start" and not listening: + listening = True + await websocket.send_text("Listening started...") + + # 음성 인식을 비동기로 수행 + # 음성 인식을 비동기로 수행 + while listening: + result = get_audio() # 음성을 텍스트로 변환 + await websocket.send_text(json.dumps(result, ensure_ascii=False)) # JSON 문자열로 변환하여 전송 + await asyncio.sleep(1) # 간격을 두고 반복 + + elif message == "stop": + listening = False + await websocket.send_text("Listening stopped...") + + except WebSocketDisconnect: + print("Client disconnected") + break + except Exception as e: + await websocket.send_text("") + break # 정적 파일 제공 설정 (HTTP 요청 처리) app.mount("/static", StaticFiles(directory=".", html=True), name="static") diff --git a/requirements.txt b/requirements.txt index 3d259b8..e47d874 100644 --- a/requirements.txt +++ b/requirements.txt @@ -6,11 +6,15 @@ click==8.1.7 fastapi==0.115.3 gTTS==2.5.3 h11==0.14.0 +httptools==0.6.4 idna==3.10 playsound==1.2.2 PyAudio==0.2.14 pydantic==2.9.2 pydantic_core==2.23.4 +pydub==0.25.1 +python-dotenv==1.0.1 +PyYAML==6.0.2 requests==2.32.3 sniffio==1.3.1 SpeechRecognition==3.11.0 @@ -18,3 +22,6 @@ starlette==0.41.0 typing_extensions==4.12.2 urllib3==2.2.3 uvicorn==0.32.0 +uvloop==0.21.0 +watchfiles==0.24.0 +websockets==13.1 diff --git a/stt.py b/stt.py index 6f580bb..c25ddc8 100644 --- a/stt.py +++ b/stt.py @@ -1,5 +1,6 @@ import speech_recognition as sr +# 음성 인식 함수 def get_audio(): r = sr.Recognizer() with sr.Microphone() as source: @@ -12,6 +13,6 @@ def get_audio(): said = r.recognize_google(audio, language="ko-KR") return {"text": said} except sr.UnknownValueError: - return {"error": "Could not understand audio"} + return {"text": ""} # 인식 실패 시 빈 문자열 반환 except sr.RequestError as e: - return {"error": f"API request failed: {e}"} + return {"error": f"API request failed: {e}"} \ No newline at end of file