From 9e8b1cdbe3e9fed86d0d3685521b35c8986edd68 Mon Sep 17 00:00:00 2001 From: Andrew Halberstadt Date: Tue, 18 Jul 2023 19:19:16 +0000 Subject: [PATCH] Bug 1839891 - Update vendored taskcluster-taskgraph to 5.6.1, r=taskgraph-reviewers,gbrown Differential Revision: https://phabricator.services.mozilla.com/D181899 --- python/sites/mach.txt | 7 + .../arrow/arrow-1.2.3.dist-info/LICENSE | 201 + .../arrow/arrow-1.2.3.dist-info/METADATA | 163 + .../python/arrow/arrow-1.2.3.dist-info/RECORD | 16 + .../arrow-1.2.3.dist-info}/WHEEL | 0 .../arrow/arrow-1.2.3.dist-info/top_level.txt | 1 + third_party/python/arrow/arrow/__init__.py | 39 + third_party/python/arrow/arrow/_version.py | 1 + third_party/python/arrow/arrow/api.py | 126 + third_party/python/arrow/arrow/arrow.py | 1886 +++++ third_party/python/arrow/arrow/constants.py | 177 + third_party/python/arrow/arrow/factory.py | 348 + third_party/python/arrow/arrow/formatter.py | 152 + third_party/python/arrow/arrow/locales.py | 6475 +++++++++++++++++ third_party/python/arrow/arrow/parser.py | 779 ++ third_party/python/arrow/arrow/py.typed | 0 third_party/python/arrow/arrow/util.py | 117 + .../DESCRIPTION.rst | 152 + .../binaryornot-0.4.4.dist-info/METADATA | 175 + .../binaryornot-0.4.4.dist-info/RECORD | 9 + .../binaryornot-0.4.4.dist-info/WHEEL | 6 + .../binaryornot-0.4.4.dist-info/metadata.json | 1 + .../binaryornot-0.4.4.dist-info/top_level.txt | 1 + .../binaryornot/binaryornot/__init__.py | 3 + .../python/binaryornot/binaryornot/check.py | 33 + .../python/binaryornot/binaryornot/helpers.py | 132 + .../cookiecutter-2.1.1.dist-info/AUTHORS.md | 215 + .../cookiecutter-2.1.1.dist-info/LICENSE | 32 + .../cookiecutter-2.1.1.dist-info/METADATA | 256 + .../cookiecutter-2.1.1.dist-info/RECORD | 25 + .../cookiecutter-2.1.1.dist-info/WHEEL | 6 + .../entry_points.txt | 2 + .../top_level.txt | 1 + .../cookiecutter/cookiecutter/__init__.py | 2 + .../cookiecutter/cookiecutter/__main__.py | 6 + .../python/cookiecutter/cookiecutter/cli.py | 231 + .../cookiecutter/cookiecutter/config.py | 122 + .../cookiecutter/cookiecutter/environment.py | 65 + .../cookiecutter/cookiecutter/exceptions.py | 163 + .../cookiecutter/cookiecutter/extensions.py | 66 + .../python/cookiecutter/cookiecutter/find.py | 31 + .../cookiecutter/cookiecutter/generate.py | 391 + .../python/cookiecutter/cookiecutter/hooks.py | 131 + .../python/cookiecutter/cookiecutter/log.py | 51 + .../python/cookiecutter/cookiecutter/main.py | 140 + .../cookiecutter/cookiecutter/prompt.py | 236 + .../cookiecutter/cookiecutter/replay.py | 52 + .../cookiecutter/cookiecutter/repository.py | 130 + .../python/cookiecutter/cookiecutter/utils.py | 120 + .../python/cookiecutter/cookiecutter/vcs.py | 125 + .../cookiecutter/cookiecutter/zipfile.py | 112 + .../DESCRIPTION.rst | 147 + .../jinja2_time-0.2.0.dist-info/METADATA | 174 + .../jinja2_time-0.2.0.dist-info/RECORD | 8 + .../jinja2_time-0.2.0.dist-info/WHEEL | 6 + .../jinja2_time-0.2.0.dist-info/metadata.json | 1 + .../jinja2_time-0.2.0.dist-info/top_level.txt | 1 + .../jinja2_time/jinja2_time/__init__.py | 10 + .../jinja2_time/jinja2_time/jinja2_time.py | 65 + third_party/python/poetry.lock | 123 +- .../python_dateutil/dateutil/__init__.py | 8 + .../python_dateutil/dateutil/_common.py | 43 + .../python_dateutil/dateutil/_version.py | 5 + .../python/python_dateutil/dateutil/easter.py | 89 + .../dateutil/parser/__init__.py | 61 + .../dateutil/parser/_parser.py | 1613 ++++ .../dateutil/parser/isoparser.py | 416 ++ .../python_dateutil/dateutil/relativedelta.py | 599 ++ .../python/python_dateutil/dateutil/rrule.py | 1737 +++++ .../python_dateutil/dateutil/tz/__init__.py | 12 + .../python_dateutil/dateutil/tz/_common.py | 419 ++ .../python_dateutil/dateutil/tz/_factories.py | 80 + .../python/python_dateutil/dateutil/tz/tz.py | 1849 +++++ .../python/python_dateutil/dateutil/tz/win.py | 370 + .../python/python_dateutil/dateutil/tzwin.py | 2 + .../python/python_dateutil/dateutil/utils.py | 71 + .../dateutil/zoneinfo/__init__.py | 167 + .../dateutil/zoneinfo/rebuild.py | 75 + .../python_dateutil-2.8.2.dist-info/LICENSE | 54 + .../python_dateutil-2.8.2.dist-info/METADATA | 204 + .../python_dateutil-2.8.2.dist-info/RECORD | 25 + .../python_dateutil-2.8.2.dist-info/WHEEL | 6 + .../top_level.txt | 1 + .../python_dateutil-2.8.2.dist-info/zip-safe | 1 + .../python_slugify-8.0.1.dist-info/LICENSE | 21 + .../python_slugify-8.0.1.dist-info/METADATA | 247 + .../python_slugify-8.0.1.dist-info/RECORD | 11 + .../python_slugify-8.0.1.dist-info/WHEEL | 6 + .../entry_points.txt | 3 + .../top_level.txt | 1 + .../python/python_slugify/slugify/__init__.py | 10 + .../python/python_slugify/slugify/__main__.py | 96 + .../python_slugify/slugify/__version__.py | 8 + .../python/python_slugify/slugify/slugify.py | 177 + .../python/python_slugify/slugify/special.py | 47 + third_party/python/requirements.in | 2 +- third_party/python/requirements.txt | 27 +- .../RECORD | 74 - .../LICENSE | 0 .../METADATA | 10 +- .../RECORD | 105 + .../WHEEL | 5 + .../entry_points.txt | 1 - .../top_level.txt | 0 .../taskgraph/__init__.py | 1 + .../taskgraph/actions/add_new_jobs.py | 2 +- .../taskgraph/actions/cancel_all.py | 2 +- .../taskgraph/actions/rebuild_cached_tasks.py | 36 + .../taskgraph/actions/retrigger.py | 2 +- .../taskgraph/actions/util.py | 4 +- .../taskcluster_taskgraph/taskgraph/config.py | 20 +- .../taskcluster_taskgraph/taskgraph/create.py | 2 +- .../taskgraph/decision.py | 4 +- .../taskcluster_taskgraph/taskgraph/docker.py | 1 - .../taskgraph/generator.py | 33 +- .../taskcluster_taskgraph/taskgraph/graph.py | 10 +- .../taskgraph/loader/default.py | 33 + .../taskcluster_taskgraph/taskgraph/main.py | 116 +- .../taskcluster_taskgraph/taskgraph/morph.py | 28 +- .../taskgraph/optimize.py | 471 ++ .../taskgraph/optimize/strategies.py | 1 - .../taskgraph/run-task/fetch-content | 10 +- .../taskgraph/run-task/robustcheckout.py | 44 +- .../taskgraph/run-task/run-task | 66 +- .../taskcluster_taskgraph/taskgraph/task.py | 28 +- .../taskgraph/taskgraph.py | 12 +- .../taskgraph/transforms/__init__.py | 3 + .../taskgraph/transforms/base.py | 53 +- .../taskgraph/transforms/docker_image.py | 5 +- .../taskgraph/transforms/fetch.py | 9 +- .../taskgraph/transforms/from_deps.py | 188 + .../taskgraph/transforms/job/__init__.py | 1 + .../taskgraph/transforms/job/common.py | 25 - .../taskgraph/transforms/job/run_task.py | 71 +- .../taskgraph/transforms/job/toolchain.py | 5 +- .../taskgraph/transforms/notify.py | 195 + .../taskgraph/transforms/task.py | 261 +- .../taskgraph/util/attributes.py | 28 +- .../taskgraph/util/dependencies.py | 89 + .../taskgraph/util/hash.py | 20 +- .../taskgraph/util/path.py | 11 +- .../taskgraph/util/treeherder.py | 20 + .../taskgraph/util/vcs.py | 23 +- .../taskgraph/util/verify.py | 22 +- .../taskgraph/util/workertypes.py | 17 +- .../DESCRIPTION.rst | 46 + .../text_unidecode-1.3.dist-info/LICENSE.txt | 134 + .../text_unidecode-1.3.dist-info/METADATA | 73 + .../text_unidecode-1.3.dist-info/RECORD | 9 + .../text_unidecode-1.3.dist-info/WHEEL | 6 + .../metadata.json | 1 + .../top_level.txt | 1 + .../text_unidecode/text_unidecode/__init__.py | 21 + .../text_unidecode/text_unidecode/data.bin | Bin 0 -> 311077 bytes 154 files changed, 24849 insertions(+), 418 deletions(-) create mode 100644 third_party/python/arrow/arrow-1.2.3.dist-info/LICENSE create mode 100644 third_party/python/arrow/arrow-1.2.3.dist-info/METADATA create mode 100644 third_party/python/arrow/arrow-1.2.3.dist-info/RECORD rename third_party/python/{taskcluster_taskgraph/taskcluster_taskgraph-3.5.2.dist-info => arrow/arrow-1.2.3.dist-info}/WHEEL (100%) create mode 100644 third_party/python/arrow/arrow-1.2.3.dist-info/top_level.txt create mode 100644 third_party/python/arrow/arrow/__init__.py create mode 100644 third_party/python/arrow/arrow/_version.py create mode 100644 third_party/python/arrow/arrow/api.py create mode 100644 third_party/python/arrow/arrow/arrow.py create mode 100644 third_party/python/arrow/arrow/constants.py create mode 100644 third_party/python/arrow/arrow/factory.py create mode 100644 third_party/python/arrow/arrow/formatter.py create mode 100644 third_party/python/arrow/arrow/locales.py create mode 100644 third_party/python/arrow/arrow/parser.py create mode 100644 third_party/python/arrow/arrow/py.typed create mode 100644 third_party/python/arrow/arrow/util.py create mode 100644 third_party/python/binaryornot/binaryornot-0.4.4.dist-info/DESCRIPTION.rst create mode 100644 third_party/python/binaryornot/binaryornot-0.4.4.dist-info/METADATA create mode 100644 third_party/python/binaryornot/binaryornot-0.4.4.dist-info/RECORD create mode 100644 third_party/python/binaryornot/binaryornot-0.4.4.dist-info/WHEEL create mode 100644 third_party/python/binaryornot/binaryornot-0.4.4.dist-info/metadata.json create mode 100644 third_party/python/binaryornot/binaryornot-0.4.4.dist-info/top_level.txt create mode 100644 third_party/python/binaryornot/binaryornot/__init__.py create mode 100644 third_party/python/binaryornot/binaryornot/check.py create mode 100644 third_party/python/binaryornot/binaryornot/helpers.py create mode 100644 third_party/python/cookiecutter/cookiecutter-2.1.1.dist-info/AUTHORS.md create mode 100644 third_party/python/cookiecutter/cookiecutter-2.1.1.dist-info/LICENSE create mode 100644 third_party/python/cookiecutter/cookiecutter-2.1.1.dist-info/METADATA create mode 100644 third_party/python/cookiecutter/cookiecutter-2.1.1.dist-info/RECORD create mode 100644 third_party/python/cookiecutter/cookiecutter-2.1.1.dist-info/WHEEL create mode 100644 third_party/python/cookiecutter/cookiecutter-2.1.1.dist-info/entry_points.txt create mode 100644 third_party/python/cookiecutter/cookiecutter-2.1.1.dist-info/top_level.txt create mode 100644 third_party/python/cookiecutter/cookiecutter/__init__.py create mode 100644 third_party/python/cookiecutter/cookiecutter/__main__.py create mode 100644 third_party/python/cookiecutter/cookiecutter/cli.py create mode 100644 third_party/python/cookiecutter/cookiecutter/config.py create mode 100644 third_party/python/cookiecutter/cookiecutter/environment.py create mode 100644 third_party/python/cookiecutter/cookiecutter/exceptions.py create mode 100644 third_party/python/cookiecutter/cookiecutter/extensions.py create mode 100644 third_party/python/cookiecutter/cookiecutter/find.py create mode 100644 third_party/python/cookiecutter/cookiecutter/generate.py create mode 100644 third_party/python/cookiecutter/cookiecutter/hooks.py create mode 100644 third_party/python/cookiecutter/cookiecutter/log.py create mode 100644 third_party/python/cookiecutter/cookiecutter/main.py create mode 100644 third_party/python/cookiecutter/cookiecutter/prompt.py create mode 100644 third_party/python/cookiecutter/cookiecutter/replay.py create mode 100644 third_party/python/cookiecutter/cookiecutter/repository.py create mode 100644 third_party/python/cookiecutter/cookiecutter/utils.py create mode 100644 third_party/python/cookiecutter/cookiecutter/vcs.py create mode 100644 third_party/python/cookiecutter/cookiecutter/zipfile.py create mode 100644 third_party/python/jinja2_time/jinja2_time-0.2.0.dist-info/DESCRIPTION.rst create mode 100644 third_party/python/jinja2_time/jinja2_time-0.2.0.dist-info/METADATA create mode 100644 third_party/python/jinja2_time/jinja2_time-0.2.0.dist-info/RECORD create mode 100644 third_party/python/jinja2_time/jinja2_time-0.2.0.dist-info/WHEEL create mode 100644 third_party/python/jinja2_time/jinja2_time-0.2.0.dist-info/metadata.json create mode 100644 third_party/python/jinja2_time/jinja2_time-0.2.0.dist-info/top_level.txt create mode 100644 third_party/python/jinja2_time/jinja2_time/__init__.py create mode 100644 third_party/python/jinja2_time/jinja2_time/jinja2_time.py create mode 100644 third_party/python/python_dateutil/dateutil/__init__.py create mode 100644 third_party/python/python_dateutil/dateutil/_common.py create mode 100644 third_party/python/python_dateutil/dateutil/_version.py create mode 100644 third_party/python/python_dateutil/dateutil/easter.py create mode 100644 third_party/python/python_dateutil/dateutil/parser/__init__.py create mode 100644 third_party/python/python_dateutil/dateutil/parser/_parser.py create mode 100644 third_party/python/python_dateutil/dateutil/parser/isoparser.py create mode 100644 third_party/python/python_dateutil/dateutil/relativedelta.py create mode 100644 third_party/python/python_dateutil/dateutil/rrule.py create mode 100644 third_party/python/python_dateutil/dateutil/tz/__init__.py create mode 100644 third_party/python/python_dateutil/dateutil/tz/_common.py create mode 100644 third_party/python/python_dateutil/dateutil/tz/_factories.py create mode 100644 third_party/python/python_dateutil/dateutil/tz/tz.py create mode 100644 third_party/python/python_dateutil/dateutil/tz/win.py create mode 100644 third_party/python/python_dateutil/dateutil/tzwin.py create mode 100644 third_party/python/python_dateutil/dateutil/utils.py create mode 100644 third_party/python/python_dateutil/dateutil/zoneinfo/__init__.py create mode 100644 third_party/python/python_dateutil/dateutil/zoneinfo/rebuild.py create mode 100644 third_party/python/python_dateutil/python_dateutil-2.8.2.dist-info/LICENSE create mode 100644 third_party/python/python_dateutil/python_dateutil-2.8.2.dist-info/METADATA create mode 100644 third_party/python/python_dateutil/python_dateutil-2.8.2.dist-info/RECORD create mode 100644 third_party/python/python_dateutil/python_dateutil-2.8.2.dist-info/WHEEL create mode 100644 third_party/python/python_dateutil/python_dateutil-2.8.2.dist-info/top_level.txt create mode 100644 third_party/python/python_dateutil/python_dateutil-2.8.2.dist-info/zip-safe create mode 100644 third_party/python/python_slugify/python_slugify-8.0.1.dist-info/LICENSE create mode 100644 third_party/python/python_slugify/python_slugify-8.0.1.dist-info/METADATA create mode 100644 third_party/python/python_slugify/python_slugify-8.0.1.dist-info/RECORD create mode 100644 third_party/python/python_slugify/python_slugify-8.0.1.dist-info/WHEEL create mode 100644 third_party/python/python_slugify/python_slugify-8.0.1.dist-info/entry_points.txt create mode 100644 third_party/python/python_slugify/python_slugify-8.0.1.dist-info/top_level.txt create mode 100644 third_party/python/python_slugify/slugify/__init__.py create mode 100644 third_party/python/python_slugify/slugify/__main__.py create mode 100644 third_party/python/python_slugify/slugify/__version__.py create mode 100644 third_party/python/python_slugify/slugify/slugify.py create mode 100644 third_party/python/python_slugify/slugify/special.py delete mode 100644 third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-3.5.2.dist-info/RECORD rename third_party/python/taskcluster_taskgraph/{taskcluster_taskgraph-3.5.2.dist-info => taskcluster_taskgraph-5.6.1.dist-info}/LICENSE (100%) rename third_party/python/taskcluster_taskgraph/{taskcluster_taskgraph-3.5.2.dist-info => taskcluster_taskgraph-5.6.1.dist-info}/METADATA (87%) create mode 100644 third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-5.6.1.dist-info/RECORD create mode 100644 third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-5.6.1.dist-info/WHEEL rename third_party/python/taskcluster_taskgraph/{taskcluster_taskgraph-3.5.2.dist-info => taskcluster_taskgraph-5.6.1.dist-info}/entry_points.txt (98%) rename third_party/python/taskcluster_taskgraph/{taskcluster_taskgraph-3.5.2.dist-info => taskcluster_taskgraph-5.6.1.dist-info}/top_level.txt (100%) create mode 100644 third_party/python/taskcluster_taskgraph/taskgraph/actions/rebuild_cached_tasks.py create mode 100644 third_party/python/taskcluster_taskgraph/taskgraph/loader/default.py create mode 100644 third_party/python/taskcluster_taskgraph/taskgraph/optimize.py create mode 100644 third_party/python/taskcluster_taskgraph/taskgraph/transforms/from_deps.py create mode 100644 third_party/python/taskcluster_taskgraph/taskgraph/transforms/notify.py create mode 100644 third_party/python/taskcluster_taskgraph/taskgraph/util/dependencies.py create mode 100644 third_party/python/text_unidecode/text_unidecode-1.3.dist-info/DESCRIPTION.rst create mode 100644 third_party/python/text_unidecode/text_unidecode-1.3.dist-info/LICENSE.txt create mode 100644 third_party/python/text_unidecode/text_unidecode-1.3.dist-info/METADATA create mode 100644 third_party/python/text_unidecode/text_unidecode-1.3.dist-info/RECORD create mode 100644 third_party/python/text_unidecode/text_unidecode-1.3.dist-info/WHEEL create mode 100644 third_party/python/text_unidecode/text_unidecode-1.3.dist-info/metadata.json create mode 100644 third_party/python/text_unidecode/text_unidecode-1.3.dist-info/top_level.txt create mode 100644 third_party/python/text_unidecode/text_unidecode/__init__.py create mode 100644 third_party/python/text_unidecode/text_unidecode/data.bin diff --git a/python/sites/mach.txt b/python/sites/mach.txt index 3b410a0a88761..c7100b92f982b 100644 --- a/python/sites/mach.txt +++ b/python/sites/mach.txt @@ -59,15 +59,18 @@ pth:testing/xpcshell vendored:third_party/python/aiohttp vendored:third_party/python/ansicon vendored:third_party/python/appdirs +vendored:third_party/python/arrow vendored:third_party/python/async_timeout vendored:third_party/python/attrs vendored:third_party/python/blessed +vendored:third_party/python/binaryornot vendored:third_party/python/cbor2 vendored:third_party/python/certifi vendored:third_party/python/chardet vendored:third_party/python/click vendored:third_party/python/colorama vendored:third_party/python/compare_locales +vendored:third_party/python/cookiecutter vendored:third_party/python/cookies vendored:third_party/python/cram vendored:third_party/python/diskcache @@ -84,6 +87,7 @@ vendored:third_party/python/idna vendored:third_party/python/importlib_metadata vendored:third_party/python/importlib_resources vendored:third_party/python/Jinja2 +vendored:third_party/python/jinja2_time vendored:third_party/python/jinxed vendored:third_party/python/jsmin vendored:third_party/python/json-e @@ -106,6 +110,8 @@ vendored:third_party/python/pylru vendored:third_party/python/pyparsing vendored:third_party/python/pyrsistent vendored:third_party/python/python-hglib +vendored:third_party/python/python_dateutil +vendored:third_party/python/python_slugify vendored:third_party/python/PyYAML/lib vendored:third_party/python/redo vendored:third_party/python/requests @@ -119,6 +125,7 @@ vendored:third_party/python/slugid vendored:third_party/python/taskcluster vendored:third_party/python/taskcluster_taskgraph vendored:third_party/python/taskcluster_urls +vendored:third_party/python/text_unidecode vendored:third_party/python/toml vendored:third_party/python/tqdm vendored:third_party/python/typing_extensions diff --git a/third_party/python/arrow/arrow-1.2.3.dist-info/LICENSE b/third_party/python/arrow/arrow-1.2.3.dist-info/LICENSE new file mode 100644 index 0000000000000..4f9eea5d14930 --- /dev/null +++ b/third_party/python/arrow/arrow-1.2.3.dist-info/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2021 Chris Smith + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/third_party/python/arrow/arrow-1.2.3.dist-info/METADATA b/third_party/python/arrow/arrow-1.2.3.dist-info/METADATA new file mode 100644 index 0000000000000..b3f89db320916 --- /dev/null +++ b/third_party/python/arrow/arrow-1.2.3.dist-info/METADATA @@ -0,0 +1,163 @@ +Metadata-Version: 2.1 +Name: arrow +Version: 1.2.3 +Summary: Better dates & times for Python +Home-page: https://arrow.readthedocs.io +Author: Chris Smith +Author-email: crsmithdev@gmail.com +License: Apache 2.0 +Project-URL: Repository, https://github.com/arrow-py/arrow +Project-URL: Bug Reports, https://github.com/arrow-py/arrow/issues +Project-URL: Documentation, https://arrow.readthedocs.io +Keywords: arrow date time datetime timestamp timezone humanize +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: Apache Software License +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Requires-Python: >=3.6 +Description-Content-Type: text/x-rst +License-File: LICENSE +Requires-Dist: python-dateutil (>=2.7.0) +Requires-Dist: typing-extensions ; python_version < "3.8" + +Arrow: Better dates & times for Python +====================================== + +.. start-inclusion-marker-do-not-remove + +.. image:: https://github.com/arrow-py/arrow/workflows/tests/badge.svg?branch=master + :alt: Build Status + :target: https://github.com/arrow-py/arrow/actions?query=workflow%3Atests+branch%3Amaster + +.. image:: https://codecov.io/gh/arrow-py/arrow/branch/master/graph/badge.svg + :alt: Coverage + :target: https://codecov.io/gh/arrow-py/arrow + +.. image:: https://img.shields.io/pypi/v/arrow.svg + :alt: PyPI Version + :target: https://pypi.python.org/pypi/arrow + +.. image:: https://img.shields.io/pypi/pyversions/arrow.svg + :alt: Supported Python Versions + :target: https://pypi.python.org/pypi/arrow + +.. image:: https://img.shields.io/pypi/l/arrow.svg + :alt: License + :target: https://pypi.python.org/pypi/arrow + +.. image:: https://img.shields.io/badge/code%20style-black-000000.svg + :alt: Code Style: Black + :target: https://github.com/psf/black + + +**Arrow** is a Python library that offers a sensible and human-friendly approach to creating, manipulating, formatting and converting dates, times and timestamps. It implements and updates the datetime type, plugging gaps in functionality and providing an intelligent module API that supports many common creation scenarios. Simply put, it helps you work with dates and times with fewer imports and a lot less code. + +Arrow is named after the `arrow of time `_ and is heavily inspired by `moment.js `_ and `requests `_. + +Why use Arrow over built-in modules? +------------------------------------ + +Python's standard library and some other low-level modules have near-complete date, time and timezone functionality, but don't work very well from a usability perspective: + +- Too many modules: datetime, time, calendar, dateutil, pytz and more +- Too many types: date, time, datetime, tzinfo, timedelta, relativedelta, etc. +- Timezones and timestamp conversions are verbose and unpleasant +- Timezone naivety is the norm +- Gaps in functionality: ISO 8601 parsing, timespans, humanization + +Features +-------- + +- Fully-implemented, drop-in replacement for datetime +- Support for Python 3.6+ +- Timezone-aware and UTC by default +- Super-simple creation options for many common input scenarios +- ``shift`` method with support for relative offsets, including weeks +- Format and parse strings automatically +- Wide support for the `ISO 8601 `_ standard +- Timezone conversion +- Support for ``dateutil``, ``pytz``, and ``ZoneInfo`` tzinfo objects +- Generates time spans, ranges, floors and ceilings for time frames ranging from microsecond to year +- Humanize dates and times with a growing list of contributed locales +- Extensible for your own Arrow-derived types +- Full support for PEP 484-style type hints + +Quick Start +----------- + +Installation +~~~~~~~~~~~~ + +To install Arrow, use `pip `_ or `pipenv `_: + +.. code-block:: console + + $ pip install -U arrow + +Example Usage +~~~~~~~~~~~~~ + +.. code-block:: python + + >>> import arrow + >>> arrow.get('2013-05-11T21:23:58.970460+07:00') + + + >>> utc = arrow.utcnow() + >>> utc + + + >>> utc = utc.shift(hours=-1) + >>> utc + + + >>> local = utc.to('US/Pacific') + >>> local + + + >>> local.timestamp() + 1368303838.970460 + + >>> local.format() + '2013-05-11 13:23:58 -07:00' + + >>> local.format('YYYY-MM-DD HH:mm:ss ZZ') + '2013-05-11 13:23:58 -07:00' + + >>> local.humanize() + 'an hour ago' + + >>> local.humanize(locale='ko-kr') + '한시간 전' + +.. end-inclusion-marker-do-not-remove + +Documentation +------------- + +For full documentation, please visit `arrow.readthedocs.io `_. + +Contributing +------------ + +Contributions are welcome for both code and localizations (adding and updating locales). Begin by gaining familiarity with the Arrow library and its features. Then, jump into contributing: + +#. Find an issue or feature to tackle on the `issue tracker `_. Issues marked with the `"good first issue" label `_ may be a great place to start! +#. Fork `this repository `_ on GitHub and begin making changes in a branch. +#. Add a few tests to ensure that the bug was fixed or the feature works as expected. +#. Run the entire test suite and linting checks by running one of the following commands: ``tox && tox -e lint,docs`` (if you have `tox `_ installed) **OR** ``make build39 && make test && make lint`` (if you do not have Python 3.9 installed, replace ``build39`` with the latest Python version on your system). +#. Submit a pull request and await feedback 😃. + +If you have any questions along the way, feel free to ask them `here `_. + +Support Arrow +------------- + +`Open Collective `_ is an online funding platform that provides tools to raise money and share your finances with full transparency. It is the platform of choice for individuals and companies to make one-time or recurring donations directly to the project. If you are interested in making a financial contribution, please visit the `Arrow collective `_. diff --git a/third_party/python/arrow/arrow-1.2.3.dist-info/RECORD b/third_party/python/arrow/arrow-1.2.3.dist-info/RECORD new file mode 100644 index 0000000000000..62e73a16f59eb --- /dev/null +++ b/third_party/python/arrow/arrow-1.2.3.dist-info/RECORD @@ -0,0 +1,16 @@ +arrow/__init__.py,sha256=HxsSJGl56GoeHB__No-kdGmC_Wes_Ttf0ohOy7OoFig,872 +arrow/_version.py,sha256=C-D_WWrVkBDmQmApLcm0sWNh2CgIrwWfc8_sB5vvU-Q,22 +arrow/api.py,sha256=6tdqrG0NjrKO22_eWHU4a5xerfR6IrZPY-yynGpnvTM,2755 +arrow/arrow.py,sha256=CnSXk3GCi1DroUvElSxlwQy9Y-2lCUSV5GKLLrBFmRA,63570 +arrow/constants.py,sha256=y3scgWgxiFuQg4DeFlhmexy1BA7K8LFNZyqK-VWPQJs,3238 +arrow/factory.py,sha256=dWP3XIYfYjqp7DCOdEYAD7PQfsbpQE70Ph9OS1A1LnE,11435 +arrow/formatter.py,sha256=YpYY8jeGZH0sgjc23PBm8HKf-EMHLp-8Ua52XfrVgPQ,5271 +arrow/locales.py,sha256=QSi6FJTVdmxDxAUIDMhUp3sJ13tHhF2tB50fA_mve0I,156276 +arrow/parser.py,sha256=ingY4axAO40kEYUL8MwqTIhFegCAVouDZk3c4YOs9aI,25720 +arrow/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +arrow/util.py,sha256=xnDevqRyNeYWbl3x-n_Tyo4cOgHcdgbxFECFsJ1XoEc,3679 +arrow-1.2.3.dist-info/LICENSE,sha256=QNbhJV1xUfXwQaUUcl08lP-owYgeWgwptr6pPwPi47s,11341 +arrow-1.2.3.dist-info/METADATA,sha256=gg8GFdfHjX15F_yMrLsluJwKUXTaQi5ECUvwBlZYe9o,6938 +arrow-1.2.3.dist-info/WHEEL,sha256=G16H4A3IeoQmnOrYV4ueZGKSjhipXx8zc8nu9FGlvMA,92 +arrow-1.2.3.dist-info/top_level.txt,sha256=aCBThK2RIB824ctI3l9i6z94l8UYpFF-BC4m3dDzFFo,6 +arrow-1.2.3.dist-info/RECORD,, diff --git a/third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-3.5.2.dist-info/WHEEL b/third_party/python/arrow/arrow-1.2.3.dist-info/WHEEL similarity index 100% rename from third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-3.5.2.dist-info/WHEEL rename to third_party/python/arrow/arrow-1.2.3.dist-info/WHEEL diff --git a/third_party/python/arrow/arrow-1.2.3.dist-info/top_level.txt b/third_party/python/arrow/arrow-1.2.3.dist-info/top_level.txt new file mode 100644 index 0000000000000..e2dc7471c204d --- /dev/null +++ b/third_party/python/arrow/arrow-1.2.3.dist-info/top_level.txt @@ -0,0 +1 @@ +arrow diff --git a/third_party/python/arrow/arrow/__init__.py b/third_party/python/arrow/arrow/__init__.py new file mode 100644 index 0000000000000..bc5970970ed19 --- /dev/null +++ b/third_party/python/arrow/arrow/__init__.py @@ -0,0 +1,39 @@ +from ._version import __version__ +from .api import get, now, utcnow +from .arrow import Arrow +from .factory import ArrowFactory +from .formatter import ( + FORMAT_ATOM, + FORMAT_COOKIE, + FORMAT_RFC822, + FORMAT_RFC850, + FORMAT_RFC1036, + FORMAT_RFC1123, + FORMAT_RFC2822, + FORMAT_RFC3339, + FORMAT_RSS, + FORMAT_W3C, +) +from .parser import ParserError + +# https://mypy.readthedocs.io/en/stable/command_line.html#cmdoption-mypy-no-implicit-reexport +# Mypy with --strict or --no-implicit-reexport requires an explicit reexport. +__all__ = [ + "__version__", + "get", + "now", + "utcnow", + "Arrow", + "ArrowFactory", + "FORMAT_ATOM", + "FORMAT_COOKIE", + "FORMAT_RFC822", + "FORMAT_RFC850", + "FORMAT_RFC1036", + "FORMAT_RFC1123", + "FORMAT_RFC2822", + "FORMAT_RFC3339", + "FORMAT_RSS", + "FORMAT_W3C", + "ParserError", +] diff --git a/third_party/python/arrow/arrow/_version.py b/third_party/python/arrow/arrow/_version.py new file mode 100644 index 0000000000000..10aa336ce071b --- /dev/null +++ b/third_party/python/arrow/arrow/_version.py @@ -0,0 +1 @@ +__version__ = "1.2.3" diff --git a/third_party/python/arrow/arrow/api.py b/third_party/python/arrow/arrow/api.py new file mode 100644 index 0000000000000..d8ed24b978506 --- /dev/null +++ b/third_party/python/arrow/arrow/api.py @@ -0,0 +1,126 @@ +""" +Provides the default implementation of :class:`ArrowFactory ` +methods for use as a module API. + +""" + +from datetime import date, datetime +from datetime import tzinfo as dt_tzinfo +from time import struct_time +from typing import Any, List, Optional, Tuple, Type, Union, overload + +from arrow.arrow import TZ_EXPR, Arrow +from arrow.constants import DEFAULT_LOCALE +from arrow.factory import ArrowFactory + +# internal default factory. +_factory = ArrowFactory() + +# TODO: Use Positional Only Argument (https://www.python.org/dev/peps/pep-0570/) +# after Python 3.7 deprecation + + +@overload +def get( + *, + locale: str = DEFAULT_LOCALE, + tzinfo: Optional[TZ_EXPR] = None, + normalize_whitespace: bool = False, +) -> Arrow: + ... # pragma: no cover + + +@overload +def get( + *args: int, + locale: str = DEFAULT_LOCALE, + tzinfo: Optional[TZ_EXPR] = None, + normalize_whitespace: bool = False, +) -> Arrow: + ... # pragma: no cover + + +@overload +def get( + __obj: Union[ + Arrow, + datetime, + date, + struct_time, + dt_tzinfo, + int, + float, + str, + Tuple[int, int, int], + ], + *, + locale: str = DEFAULT_LOCALE, + tzinfo: Optional[TZ_EXPR] = None, + normalize_whitespace: bool = False, +) -> Arrow: + ... # pragma: no cover + + +@overload +def get( + __arg1: Union[datetime, date], + __arg2: TZ_EXPR, + *, + locale: str = DEFAULT_LOCALE, + tzinfo: Optional[TZ_EXPR] = None, + normalize_whitespace: bool = False, +) -> Arrow: + ... # pragma: no cover + + +@overload +def get( + __arg1: str, + __arg2: Union[str, List[str]], + *, + locale: str = DEFAULT_LOCALE, + tzinfo: Optional[TZ_EXPR] = None, + normalize_whitespace: bool = False, +) -> Arrow: + ... # pragma: no cover + + +def get(*args: Any, **kwargs: Any) -> Arrow: + """Calls the default :class:`ArrowFactory ` ``get`` method.""" + + return _factory.get(*args, **kwargs) + + +get.__doc__ = _factory.get.__doc__ + + +def utcnow() -> Arrow: + """Calls the default :class:`ArrowFactory ` ``utcnow`` method.""" + + return _factory.utcnow() + + +utcnow.__doc__ = _factory.utcnow.__doc__ + + +def now(tz: Optional[TZ_EXPR] = None) -> Arrow: + """Calls the default :class:`ArrowFactory ` ``now`` method.""" + + return _factory.now(tz) + + +now.__doc__ = _factory.now.__doc__ + + +def factory(type: Type[Arrow]) -> ArrowFactory: + """Returns an :class:`.ArrowFactory` for the specified :class:`Arrow ` + or derived type. + + :param type: the type, :class:`Arrow ` or derived. + + """ + + return ArrowFactory(type) + + +__all__ = ["get", "utcnow", "now", "factory"] diff --git a/third_party/python/arrow/arrow/arrow.py b/third_party/python/arrow/arrow/arrow.py new file mode 100644 index 0000000000000..1ede107f56324 --- /dev/null +++ b/third_party/python/arrow/arrow/arrow.py @@ -0,0 +1,1886 @@ +""" +Provides the :class:`Arrow ` class, an enhanced ``datetime`` +replacement. + +""" + + +import calendar +import re +import sys +from datetime import date +from datetime import datetime as dt_datetime +from datetime import time as dt_time +from datetime import timedelta +from datetime import tzinfo as dt_tzinfo +from math import trunc +from time import struct_time +from typing import ( + Any, + ClassVar, + Generator, + Iterable, + List, + Mapping, + Optional, + Tuple, + Union, + cast, + overload, +) + +from dateutil import tz as dateutil_tz +from dateutil.relativedelta import relativedelta + +from arrow import formatter, locales, parser, util +from arrow.constants import DEFAULT_LOCALE, DEHUMANIZE_LOCALES +from arrow.locales import TimeFrameLiteral + +if sys.version_info < (3, 8): # pragma: no cover + from typing_extensions import Final, Literal +else: + from typing import Final, Literal # pragma: no cover + + +TZ_EXPR = Union[dt_tzinfo, str] + +_T_FRAMES = Literal[ + "year", + "years", + "month", + "months", + "day", + "days", + "hour", + "hours", + "minute", + "minutes", + "second", + "seconds", + "microsecond", + "microseconds", + "week", + "weeks", + "quarter", + "quarters", +] + +_BOUNDS = Literal["[)", "()", "(]", "[]"] + +_GRANULARITY = Literal[ + "auto", + "second", + "minute", + "hour", + "day", + "week", + "month", + "quarter", + "year", +] + + +class Arrow: + """An :class:`Arrow ` object. + + Implements the ``datetime`` interface, behaving as an aware ``datetime`` while implementing + additional functionality. + + :param year: the calendar year. + :param month: the calendar month. + :param day: the calendar day. + :param hour: (optional) the hour. Defaults to 0. + :param minute: (optional) the minute, Defaults to 0. + :param second: (optional) the second, Defaults to 0. + :param microsecond: (optional) the microsecond. Defaults to 0. + :param tzinfo: (optional) A timezone expression. Defaults to UTC. + :param fold: (optional) 0 or 1, used to disambiguate repeated wall times. Defaults to 0. + + .. _tz-expr: + + Recognized timezone expressions: + + - A ``tzinfo`` object. + - A ``str`` describing a timezone, similar to 'US/Pacific', or 'Europe/Berlin'. + - A ``str`` in ISO 8601 style, as in '+07:00'. + - A ``str``, one of the following: 'local', 'utc', 'UTC'. + + Usage:: + + >>> import arrow + >>> arrow.Arrow(2013, 5, 5, 12, 30, 45) + + + """ + + resolution: ClassVar[timedelta] = dt_datetime.resolution + min: ClassVar["Arrow"] + max: ClassVar["Arrow"] + + _ATTRS: Final[List[str]] = [ + "year", + "month", + "day", + "hour", + "minute", + "second", + "microsecond", + ] + _ATTRS_PLURAL: Final[List[str]] = [f"{a}s" for a in _ATTRS] + _MONTHS_PER_QUARTER: Final[int] = 3 + _SECS_PER_MINUTE: Final[int] = 60 + _SECS_PER_HOUR: Final[int] = 60 * 60 + _SECS_PER_DAY: Final[int] = 60 * 60 * 24 + _SECS_PER_WEEK: Final[int] = 60 * 60 * 24 * 7 + _SECS_PER_MONTH: Final[float] = 60 * 60 * 24 * 30.5 + _SECS_PER_QUARTER: Final[float] = 60 * 60 * 24 * 30.5 * 3 + _SECS_PER_YEAR: Final[int] = 60 * 60 * 24 * 365 + + _SECS_MAP: Final[Mapping[TimeFrameLiteral, float]] = { + "second": 1.0, + "minute": _SECS_PER_MINUTE, + "hour": _SECS_PER_HOUR, + "day": _SECS_PER_DAY, + "week": _SECS_PER_WEEK, + "month": _SECS_PER_MONTH, + "quarter": _SECS_PER_QUARTER, + "year": _SECS_PER_YEAR, + } + + _datetime: dt_datetime + + def __init__( + self, + year: int, + month: int, + day: int, + hour: int = 0, + minute: int = 0, + second: int = 0, + microsecond: int = 0, + tzinfo: Optional[TZ_EXPR] = None, + **kwargs: Any, + ) -> None: + if tzinfo is None: + tzinfo = dateutil_tz.tzutc() + # detect that tzinfo is a pytz object (issue #626) + elif ( + isinstance(tzinfo, dt_tzinfo) + and hasattr(tzinfo, "localize") + and hasattr(tzinfo, "zone") + and tzinfo.zone # type: ignore[attr-defined] + ): + tzinfo = parser.TzinfoParser.parse(tzinfo.zone) # type: ignore[attr-defined] + elif isinstance(tzinfo, str): + tzinfo = parser.TzinfoParser.parse(tzinfo) + + fold = kwargs.get("fold", 0) + + self._datetime = dt_datetime( + year, month, day, hour, minute, second, microsecond, tzinfo, fold=fold + ) + + # factories: single object, both original and from datetime. + + @classmethod + def now(cls, tzinfo: Optional[dt_tzinfo] = None) -> "Arrow": + """Constructs an :class:`Arrow ` object, representing "now" in the given + timezone. + + :param tzinfo: (optional) a ``tzinfo`` object. Defaults to local time. + + Usage:: + + >>> arrow.now('Asia/Baku') + + + """ + + if tzinfo is None: + tzinfo = dateutil_tz.tzlocal() + + dt = dt_datetime.now(tzinfo) + + return cls( + dt.year, + dt.month, + dt.day, + dt.hour, + dt.minute, + dt.second, + dt.microsecond, + dt.tzinfo, + fold=getattr(dt, "fold", 0), + ) + + @classmethod + def utcnow(cls) -> "Arrow": + """Constructs an :class:`Arrow ` object, representing "now" in UTC + time. + + Usage:: + + >>> arrow.utcnow() + + + """ + + dt = dt_datetime.now(dateutil_tz.tzutc()) + + return cls( + dt.year, + dt.month, + dt.day, + dt.hour, + dt.minute, + dt.second, + dt.microsecond, + dt.tzinfo, + fold=getattr(dt, "fold", 0), + ) + + @classmethod + def fromtimestamp( + cls, + timestamp: Union[int, float, str], + tzinfo: Optional[TZ_EXPR] = None, + ) -> "Arrow": + """Constructs an :class:`Arrow ` object from a timestamp, converted to + the given timezone. + + :param timestamp: an ``int`` or ``float`` timestamp, or a ``str`` that converts to either. + :param tzinfo: (optional) a ``tzinfo`` object. Defaults to local time. + + """ + + if tzinfo is None: + tzinfo = dateutil_tz.tzlocal() + elif isinstance(tzinfo, str): + tzinfo = parser.TzinfoParser.parse(tzinfo) + + if not util.is_timestamp(timestamp): + raise ValueError(f"The provided timestamp {timestamp!r} is invalid.") + + timestamp = util.normalize_timestamp(float(timestamp)) + dt = dt_datetime.fromtimestamp(timestamp, tzinfo) + + return cls( + dt.year, + dt.month, + dt.day, + dt.hour, + dt.minute, + dt.second, + dt.microsecond, + dt.tzinfo, + fold=getattr(dt, "fold", 0), + ) + + @classmethod + def utcfromtimestamp(cls, timestamp: Union[int, float, str]) -> "Arrow": + """Constructs an :class:`Arrow ` object from a timestamp, in UTC time. + + :param timestamp: an ``int`` or ``float`` timestamp, or a ``str`` that converts to either. + + """ + + if not util.is_timestamp(timestamp): + raise ValueError(f"The provided timestamp {timestamp!r} is invalid.") + + timestamp = util.normalize_timestamp(float(timestamp)) + dt = dt_datetime.utcfromtimestamp(timestamp) + + return cls( + dt.year, + dt.month, + dt.day, + dt.hour, + dt.minute, + dt.second, + dt.microsecond, + dateutil_tz.tzutc(), + fold=getattr(dt, "fold", 0), + ) + + @classmethod + def fromdatetime(cls, dt: dt_datetime, tzinfo: Optional[TZ_EXPR] = None) -> "Arrow": + """Constructs an :class:`Arrow ` object from a ``datetime`` and + optional replacement timezone. + + :param dt: the ``datetime`` + :param tzinfo: (optional) A :ref:`timezone expression `. Defaults to ``dt``'s + timezone, or UTC if naive. + + Usage:: + + >>> dt + datetime.datetime(2021, 4, 7, 13, 48, tzinfo=tzfile('/usr/share/zoneinfo/US/Pacific')) + >>> arrow.Arrow.fromdatetime(dt) + + + """ + + if tzinfo is None: + if dt.tzinfo is None: + tzinfo = dateutil_tz.tzutc() + else: + tzinfo = dt.tzinfo + + return cls( + dt.year, + dt.month, + dt.day, + dt.hour, + dt.minute, + dt.second, + dt.microsecond, + tzinfo, + fold=getattr(dt, "fold", 0), + ) + + @classmethod + def fromdate(cls, date: date, tzinfo: Optional[TZ_EXPR] = None) -> "Arrow": + """Constructs an :class:`Arrow ` object from a ``date`` and optional + replacement timezone. All time values are set to 0. + + :param date: the ``date`` + :param tzinfo: (optional) A :ref:`timezone expression `. Defaults to UTC. + + """ + + if tzinfo is None: + tzinfo = dateutil_tz.tzutc() + + return cls(date.year, date.month, date.day, tzinfo=tzinfo) + + @classmethod + def strptime( + cls, date_str: str, fmt: str, tzinfo: Optional[TZ_EXPR] = None + ) -> "Arrow": + """Constructs an :class:`Arrow ` object from a date string and format, + in the style of ``datetime.strptime``. Optionally replaces the parsed timezone. + + :param date_str: the date string. + :param fmt: the format string using datetime format codes. + :param tzinfo: (optional) A :ref:`timezone expression `. Defaults to the parsed + timezone if ``fmt`` contains a timezone directive, otherwise UTC. + + Usage:: + + >>> arrow.Arrow.strptime('20-01-2019 15:49:10', '%d-%m-%Y %H:%M:%S') + + + """ + + dt = dt_datetime.strptime(date_str, fmt) + if tzinfo is None: + tzinfo = dt.tzinfo + + return cls( + dt.year, + dt.month, + dt.day, + dt.hour, + dt.minute, + dt.second, + dt.microsecond, + tzinfo, + fold=getattr(dt, "fold", 0), + ) + + @classmethod + def fromordinal(cls, ordinal: int) -> "Arrow": + """Constructs an :class:`Arrow ` object corresponding + to the Gregorian Ordinal. + + :param ordinal: an ``int`` corresponding to a Gregorian Ordinal. + + Usage:: + + >>> arrow.fromordinal(737741) + + + """ + + util.validate_ordinal(ordinal) + dt = dt_datetime.fromordinal(ordinal) + return cls( + dt.year, + dt.month, + dt.day, + dt.hour, + dt.minute, + dt.second, + dt.microsecond, + dt.tzinfo, + fold=getattr(dt, "fold", 0), + ) + + # factories: ranges and spans + + @classmethod + def range( + cls, + frame: _T_FRAMES, + start: Union["Arrow", dt_datetime], + end: Union["Arrow", dt_datetime, None] = None, + tz: Optional[TZ_EXPR] = None, + limit: Optional[int] = None, + ) -> Generator["Arrow", None, None]: + """Returns an iterator of :class:`Arrow ` objects, representing + points in time between two inputs. + + :param frame: The timeframe. Can be any ``datetime`` property (day, hour, minute...). + :param start: A datetime expression, the start of the range. + :param end: (optional) A datetime expression, the end of the range. + :param tz: (optional) A :ref:`timezone expression `. Defaults to + ``start``'s timezone, or UTC if ``start`` is naive. + :param limit: (optional) A maximum number of tuples to return. + + **NOTE**: The ``end`` or ``limit`` must be provided. Call with ``end`` alone to + return the entire range. Call with ``limit`` alone to return a maximum # of results from + the start. Call with both to cap a range at a maximum # of results. + + **NOTE**: ``tz`` internally **replaces** the timezones of both ``start`` and ``end`` before + iterating. As such, either call with naive objects and ``tz``, or aware objects from the + same timezone and no ``tz``. + + Supported frame values: year, quarter, month, week, day, hour, minute, second, microsecond. + + Recognized datetime expressions: + + - An :class:`Arrow ` object. + - A ``datetime`` object. + + Usage:: + + >>> start = datetime(2013, 5, 5, 12, 30) + >>> end = datetime(2013, 5, 5, 17, 15) + >>> for r in arrow.Arrow.range('hour', start, end): + ... print(repr(r)) + ... + + + + + + + **NOTE**: Unlike Python's ``range``, ``end`` *may* be included in the returned iterator:: + + >>> start = datetime(2013, 5, 5, 12, 30) + >>> end = datetime(2013, 5, 5, 13, 30) + >>> for r in arrow.Arrow.range('hour', start, end): + ... print(repr(r)) + ... + + + + """ + + _, frame_relative, relative_steps = cls._get_frames(frame) + + tzinfo = cls._get_tzinfo(start.tzinfo if tz is None else tz) + + start = cls._get_datetime(start).replace(tzinfo=tzinfo) + end, limit = cls._get_iteration_params(end, limit) + end = cls._get_datetime(end).replace(tzinfo=tzinfo) + + current = cls.fromdatetime(start) + original_day = start.day + day_is_clipped = False + i = 0 + + while current <= end and i < limit: + i += 1 + yield current + + values = [getattr(current, f) for f in cls._ATTRS] + current = cls(*values, tzinfo=tzinfo).shift( # type: ignore + **{frame_relative: relative_steps} + ) + + if frame in ["month", "quarter", "year"] and current.day < original_day: + day_is_clipped = True + + if day_is_clipped and not cls._is_last_day_of_month(current): + current = current.replace(day=original_day) + + def span( + self, + frame: _T_FRAMES, + count: int = 1, + bounds: _BOUNDS = "[)", + exact: bool = False, + week_start: int = 1, + ) -> Tuple["Arrow", "Arrow"]: + """Returns a tuple of two new :class:`Arrow ` objects, representing the timespan + of the :class:`Arrow ` object in a given timeframe. + + :param frame: the timeframe. Can be any ``datetime`` property (day, hour, minute...). + :param count: (optional) the number of frames to span. + :param bounds: (optional) a ``str`` of either '()', '(]', '[)', or '[]' that specifies + whether to include or exclude the start and end values in the span. '(' excludes + the start, '[' includes the start, ')' excludes the end, and ']' includes the end. + If the bounds are not specified, the default bound '[)' is used. + :param exact: (optional) whether to have the start of the timespan begin exactly + at the time specified by ``start`` and the end of the timespan truncated + so as not to extend beyond ``end``. + :param week_start: (optional) only used in combination with the week timeframe. Follows isoweekday() where + Monday is 1 and Sunday is 7. + + Supported frame values: year, quarter, month, week, day, hour, minute, second. + + Usage:: + + >>> arrow.utcnow() + + + >>> arrow.utcnow().span('hour') + (, ) + + >>> arrow.utcnow().span('day') + (, ) + + >>> arrow.utcnow().span('day', count=2) + (, ) + + >>> arrow.utcnow().span('day', bounds='[]') + (, ) + + >>> arrow.utcnow().span('week') + (, ) + + >>> arrow.utcnow().span('week', week_start=6) + (, ) + + """ + if not 1 <= week_start <= 7: + raise ValueError("week_start argument must be between 1 and 7.") + + util.validate_bounds(bounds) + + frame_absolute, frame_relative, relative_steps = self._get_frames(frame) + + if frame_absolute == "week": + attr = "day" + elif frame_absolute == "quarter": + attr = "month" + else: + attr = frame_absolute + + floor = self + if not exact: + index = self._ATTRS.index(attr) + frames = self._ATTRS[: index + 1] + + values = [getattr(self, f) for f in frames] + + for _ in range(3 - len(values)): + values.append(1) + + floor = self.__class__(*values, tzinfo=self.tzinfo) # type: ignore + + if frame_absolute == "week": + # if week_start is greater than self.isoweekday() go back one week by setting delta = 7 + delta = 7 if week_start > self.isoweekday() else 0 + floor = floor.shift(days=-(self.isoweekday() - week_start) - delta) + elif frame_absolute == "quarter": + floor = floor.shift(months=-((self.month - 1) % 3)) + + ceil = floor.shift(**{frame_relative: count * relative_steps}) + + if bounds[0] == "(": + floor = floor.shift(microseconds=+1) + + if bounds[1] == ")": + ceil = ceil.shift(microseconds=-1) + + return floor, ceil + + def floor(self, frame: _T_FRAMES) -> "Arrow": + """Returns a new :class:`Arrow ` object, representing the "floor" + of the timespan of the :class:`Arrow ` object in a given timeframe. + Equivalent to the first element in the 2-tuple returned by + :func:`span `. + + :param frame: the timeframe. Can be any ``datetime`` property (day, hour, minute...). + + Usage:: + + >>> arrow.utcnow().floor('hour') + + + """ + + return self.span(frame)[0] + + def ceil(self, frame: _T_FRAMES) -> "Arrow": + """Returns a new :class:`Arrow ` object, representing the "ceiling" + of the timespan of the :class:`Arrow ` object in a given timeframe. + Equivalent to the second element in the 2-tuple returned by + :func:`span `. + + :param frame: the timeframe. Can be any ``datetime`` property (day, hour, minute...). + + Usage:: + + >>> arrow.utcnow().ceil('hour') + + + """ + + return self.span(frame)[1] + + @classmethod + def span_range( + cls, + frame: _T_FRAMES, + start: dt_datetime, + end: dt_datetime, + tz: Optional[TZ_EXPR] = None, + limit: Optional[int] = None, + bounds: _BOUNDS = "[)", + exact: bool = False, + ) -> Iterable[Tuple["Arrow", "Arrow"]]: + """Returns an iterator of tuples, each :class:`Arrow ` objects, + representing a series of timespans between two inputs. + + :param frame: The timeframe. Can be any ``datetime`` property (day, hour, minute...). + :param start: A datetime expression, the start of the range. + :param end: (optional) A datetime expression, the end of the range. + :param tz: (optional) A :ref:`timezone expression `. Defaults to + ``start``'s timezone, or UTC if ``start`` is naive. + :param limit: (optional) A maximum number of tuples to return. + :param bounds: (optional) a ``str`` of either '()', '(]', '[)', or '[]' that specifies + whether to include or exclude the start and end values in each span in the range. '(' excludes + the start, '[' includes the start, ')' excludes the end, and ']' includes the end. + If the bounds are not specified, the default bound '[)' is used. + :param exact: (optional) whether to have the first timespan start exactly + at the time specified by ``start`` and the final span truncated + so as not to extend beyond ``end``. + + **NOTE**: The ``end`` or ``limit`` must be provided. Call with ``end`` alone to + return the entire range. Call with ``limit`` alone to return a maximum # of results from + the start. Call with both to cap a range at a maximum # of results. + + **NOTE**: ``tz`` internally **replaces** the timezones of both ``start`` and ``end`` before + iterating. As such, either call with naive objects and ``tz``, or aware objects from the + same timezone and no ``tz``. + + Supported frame values: year, quarter, month, week, day, hour, minute, second, microsecond. + + Recognized datetime expressions: + + - An :class:`Arrow ` object. + - A ``datetime`` object. + + **NOTE**: Unlike Python's ``range``, ``end`` will *always* be included in the returned + iterator of timespans. + + Usage: + + >>> start = datetime(2013, 5, 5, 12, 30) + >>> end = datetime(2013, 5, 5, 17, 15) + >>> for r in arrow.Arrow.span_range('hour', start, end): + ... print(r) + ... + (, ) + (, ) + (, ) + (, ) + (, ) + (, ) + + """ + + tzinfo = cls._get_tzinfo(start.tzinfo if tz is None else tz) + start = cls.fromdatetime(start, tzinfo).span(frame, exact=exact)[0] + end = cls.fromdatetime(end, tzinfo) + _range = cls.range(frame, start, end, tz, limit) + if not exact: + for r in _range: + yield r.span(frame, bounds=bounds, exact=exact) + + for r in _range: + floor, ceil = r.span(frame, bounds=bounds, exact=exact) + if ceil > end: + ceil = end + if bounds[1] == ")": + ceil += relativedelta(microseconds=-1) + if floor == end: + break + elif floor + relativedelta(microseconds=-1) == end: + break + yield floor, ceil + + @classmethod + def interval( + cls, + frame: _T_FRAMES, + start: dt_datetime, + end: dt_datetime, + interval: int = 1, + tz: Optional[TZ_EXPR] = None, + bounds: _BOUNDS = "[)", + exact: bool = False, + ) -> Iterable[Tuple["Arrow", "Arrow"]]: + """Returns an iterator of tuples, each :class:`Arrow ` objects, + representing a series of intervals between two inputs. + + :param frame: The timeframe. Can be any ``datetime`` property (day, hour, minute...). + :param start: A datetime expression, the start of the range. + :param end: (optional) A datetime expression, the end of the range. + :param interval: (optional) Time interval for the given time frame. + :param tz: (optional) A timezone expression. Defaults to UTC. + :param bounds: (optional) a ``str`` of either '()', '(]', '[)', or '[]' that specifies + whether to include or exclude the start and end values in the intervals. '(' excludes + the start, '[' includes the start, ')' excludes the end, and ']' includes the end. + If the bounds are not specified, the default bound '[)' is used. + :param exact: (optional) whether to have the first timespan start exactly + at the time specified by ``start`` and the final interval truncated + so as not to extend beyond ``end``. + + Supported frame values: year, quarter, month, week, day, hour, minute, second + + Recognized datetime expressions: + + - An :class:`Arrow ` object. + - A ``datetime`` object. + + Recognized timezone expressions: + + - A ``tzinfo`` object. + - A ``str`` describing a timezone, similar to 'US/Pacific', or 'Europe/Berlin'. + - A ``str`` in ISO 8601 style, as in '+07:00'. + - A ``str``, one of the following: 'local', 'utc', 'UTC'. + + Usage: + + >>> start = datetime(2013, 5, 5, 12, 30) + >>> end = datetime(2013, 5, 5, 17, 15) + >>> for r in arrow.Arrow.interval('hour', start, end, 2): + ... print(r) + ... + (, ) + (, ) + (, ) + """ + if interval < 1: + raise ValueError("interval has to be a positive integer") + + spanRange = iter( + cls.span_range(frame, start, end, tz, bounds=bounds, exact=exact) + ) + while True: + try: + intvlStart, intvlEnd = next(spanRange) + for _ in range(interval - 1): + try: + _, intvlEnd = next(spanRange) + except StopIteration: + continue + yield intvlStart, intvlEnd + except StopIteration: + return + + # representations + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} [{self.__str__()}]>" + + def __str__(self) -> str: + return self._datetime.isoformat() + + def __format__(self, formatstr: str) -> str: + + if len(formatstr) > 0: + return self.format(formatstr) + + return str(self) + + def __hash__(self) -> int: + return self._datetime.__hash__() + + # attributes and properties + + def __getattr__(self, name: str) -> int: + + if name == "week": + return self.isocalendar()[1] + + if name == "quarter": + return int((self.month - 1) / self._MONTHS_PER_QUARTER) + 1 + + if not name.startswith("_"): + value: Optional[int] = getattr(self._datetime, name, None) + + if value is not None: + return value + + return cast(int, object.__getattribute__(self, name)) + + @property + def tzinfo(self) -> dt_tzinfo: + """Gets the ``tzinfo`` of the :class:`Arrow ` object. + + Usage:: + + >>> arw=arrow.utcnow() + >>> arw.tzinfo + tzutc() + + """ + + # In Arrow, `_datetime` cannot be naive. + return cast(dt_tzinfo, self._datetime.tzinfo) + + @property + def datetime(self) -> dt_datetime: + """Returns a datetime representation of the :class:`Arrow ` object. + + Usage:: + + >>> arw=arrow.utcnow() + >>> arw.datetime + datetime.datetime(2019, 1, 24, 16, 35, 27, 276649, tzinfo=tzutc()) + + """ + + return self._datetime + + @property + def naive(self) -> dt_datetime: + """Returns a naive datetime representation of the :class:`Arrow ` + object. + + Usage:: + + >>> nairobi = arrow.now('Africa/Nairobi') + >>> nairobi + + >>> nairobi.naive + datetime.datetime(2019, 1, 23, 19, 27, 12, 297999) + + """ + + return self._datetime.replace(tzinfo=None) + + def timestamp(self) -> float: + """Returns a timestamp representation of the :class:`Arrow ` object, in + UTC time. + + Usage:: + + >>> arrow.utcnow().timestamp() + 1616882340.256501 + + """ + + return self._datetime.timestamp() + + @property + def int_timestamp(self) -> int: + """Returns an integer timestamp representation of the :class:`Arrow ` object, in + UTC time. + + Usage:: + + >>> arrow.utcnow().int_timestamp + 1548260567 + + """ + + return int(self.timestamp()) + + @property + def float_timestamp(self) -> float: + """Returns a floating-point timestamp representation of the :class:`Arrow ` + object, in UTC time. + + Usage:: + + >>> arrow.utcnow().float_timestamp + 1548260516.830896 + + """ + + return self.timestamp() + + @property + def fold(self) -> int: + """Returns the ``fold`` value of the :class:`Arrow ` object.""" + + return self._datetime.fold + + @property + def ambiguous(self) -> bool: + """Indicates whether the :class:`Arrow ` object is a repeated wall time in the current + timezone. + + """ + + return dateutil_tz.datetime_ambiguous(self._datetime) + + @property + def imaginary(self) -> bool: + """Indicates whether the :class: `Arrow ` object exists in the current timezone.""" + + return not dateutil_tz.datetime_exists(self._datetime) + + # mutation and duplication. + + def clone(self) -> "Arrow": + """Returns a new :class:`Arrow ` object, cloned from the current one. + + Usage: + + >>> arw = arrow.utcnow() + >>> cloned = arw.clone() + + """ + + return self.fromdatetime(self._datetime) + + def replace(self, **kwargs: Any) -> "Arrow": + """Returns a new :class:`Arrow ` object with attributes updated + according to inputs. + + Use property names to set their value absolutely:: + + >>> import arrow + >>> arw = arrow.utcnow() + >>> arw + + >>> arw.replace(year=2014, month=6) + + + You can also replace the timezone without conversion, using a + :ref:`timezone expression `:: + + >>> arw.replace(tzinfo=tz.tzlocal()) + + + """ + + absolute_kwargs = {} + + for key, value in kwargs.items(): + + if key in self._ATTRS: + absolute_kwargs[key] = value + elif key in ["week", "quarter"]: + raise ValueError(f"Setting absolute {key} is not supported.") + elif key not in ["tzinfo", "fold"]: + raise ValueError(f"Unknown attribute: {key!r}.") + + current = self._datetime.replace(**absolute_kwargs) + + tzinfo = kwargs.get("tzinfo") + + if tzinfo is not None: + tzinfo = self._get_tzinfo(tzinfo) + current = current.replace(tzinfo=tzinfo) + + fold = kwargs.get("fold") + + if fold is not None: + current = current.replace(fold=fold) + + return self.fromdatetime(current) + + def shift(self, **kwargs: Any) -> "Arrow": + """Returns a new :class:`Arrow ` object with attributes updated + according to inputs. + + Use pluralized property names to relatively shift their current value: + + >>> import arrow + >>> arw = arrow.utcnow() + >>> arw + + >>> arw.shift(years=1, months=-1) + + + Day-of-the-week relative shifting can use either Python's weekday numbers + (Monday = 0, Tuesday = 1 .. Sunday = 6) or using dateutil.relativedelta's + day instances (MO, TU .. SU). When using weekday numbers, the returned + date will always be greater than or equal to the starting date. + + Using the above code (which is a Saturday) and asking it to shift to Saturday: + + >>> arw.shift(weekday=5) + + + While asking for a Monday: + + >>> arw.shift(weekday=0) + + + """ + + relative_kwargs = {} + additional_attrs = ["weeks", "quarters", "weekday"] + + for key, value in kwargs.items(): + + if key in self._ATTRS_PLURAL or key in additional_attrs: + relative_kwargs[key] = value + else: + supported_attr = ", ".join(self._ATTRS_PLURAL + additional_attrs) + raise ValueError( + f"Invalid shift time frame. Please select one of the following: {supported_attr}." + ) + + # core datetime does not support quarters, translate to months. + relative_kwargs.setdefault("months", 0) + relative_kwargs["months"] += ( + relative_kwargs.pop("quarters", 0) * self._MONTHS_PER_QUARTER + ) + + current = self._datetime + relativedelta(**relative_kwargs) + + if not dateutil_tz.datetime_exists(current): + current = dateutil_tz.resolve_imaginary(current) + + return self.fromdatetime(current) + + def to(self, tz: TZ_EXPR) -> "Arrow": + """Returns a new :class:`Arrow ` object, converted + to the target timezone. + + :param tz: A :ref:`timezone expression `. + + Usage:: + + >>> utc = arrow.utcnow() + >>> utc + + + >>> utc.to('US/Pacific') + + + >>> utc.to(tz.tzlocal()) + + + >>> utc.to('-07:00') + + + >>> utc.to('local') + + + >>> utc.to('local').to('utc') + + + """ + + if not isinstance(tz, dt_tzinfo): + tz = parser.TzinfoParser.parse(tz) + + dt = self._datetime.astimezone(tz) + + return self.__class__( + dt.year, + dt.month, + dt.day, + dt.hour, + dt.minute, + dt.second, + dt.microsecond, + dt.tzinfo, + fold=getattr(dt, "fold", 0), + ) + + # string output and formatting + + def format( + self, fmt: str = "YYYY-MM-DD HH:mm:ssZZ", locale: str = DEFAULT_LOCALE + ) -> str: + """Returns a string representation of the :class:`Arrow ` object, + formatted according to the provided format string. + + :param fmt: the format string. + :param locale: the locale to format. + + Usage:: + + >>> arrow.utcnow().format('YYYY-MM-DD HH:mm:ss ZZ') + '2013-05-09 03:56:47 -00:00' + + >>> arrow.utcnow().format('X') + '1368071882' + + >>> arrow.utcnow().format('MMMM DD, YYYY') + 'May 09, 2013' + + >>> arrow.utcnow().format() + '2013-05-09 03:56:47 -00:00' + + """ + + return formatter.DateTimeFormatter(locale).format(self._datetime, fmt) + + def humanize( + self, + other: Union["Arrow", dt_datetime, None] = None, + locale: str = DEFAULT_LOCALE, + only_distance: bool = False, + granularity: Union[_GRANULARITY, List[_GRANULARITY]] = "auto", + ) -> str: + """Returns a localized, humanized representation of a relative difference in time. + + :param other: (optional) an :class:`Arrow ` or ``datetime`` object. + Defaults to now in the current :class:`Arrow ` object's timezone. + :param locale: (optional) a ``str`` specifying a locale. Defaults to 'en-us'. + :param only_distance: (optional) returns only time difference eg: "11 seconds" without "in" or "ago" part. + :param granularity: (optional) defines the precision of the output. Set it to strings 'second', 'minute', + 'hour', 'day', 'week', 'month' or 'year' or a list of any combination of these strings + + Usage:: + + >>> earlier = arrow.utcnow().shift(hours=-2) + >>> earlier.humanize() + '2 hours ago' + + >>> later = earlier.shift(hours=4) + >>> later.humanize(earlier) + 'in 4 hours' + + """ + + locale_name = locale + locale = locales.get_locale(locale) + + if other is None: + utc = dt_datetime.utcnow().replace(tzinfo=dateutil_tz.tzutc()) + dt = utc.astimezone(self._datetime.tzinfo) + + elif isinstance(other, Arrow): + dt = other._datetime + + elif isinstance(other, dt_datetime): + if other.tzinfo is None: + dt = other.replace(tzinfo=self._datetime.tzinfo) + else: + dt = other.astimezone(self._datetime.tzinfo) + + else: + raise TypeError( + f"Invalid 'other' argument of type {type(other).__name__!r}. " + "Argument must be of type None, Arrow, or datetime." + ) + + if isinstance(granularity, list) and len(granularity) == 1: + granularity = granularity[0] + + _delta = int(round((self._datetime - dt).total_seconds())) + sign = -1 if _delta < 0 else 1 + delta_second = diff = abs(_delta) + + try: + if granularity == "auto": + if diff < 10: + return locale.describe("now", only_distance=only_distance) + + if diff < self._SECS_PER_MINUTE: + seconds = sign * delta_second + return locale.describe( + "seconds", seconds, only_distance=only_distance + ) + + elif diff < self._SECS_PER_MINUTE * 2: + return locale.describe("minute", sign, only_distance=only_distance) + elif diff < self._SECS_PER_HOUR: + minutes = sign * max(delta_second // self._SECS_PER_MINUTE, 2) + return locale.describe( + "minutes", minutes, only_distance=only_distance + ) + + elif diff < self._SECS_PER_HOUR * 2: + return locale.describe("hour", sign, only_distance=only_distance) + elif diff < self._SECS_PER_DAY: + hours = sign * max(delta_second // self._SECS_PER_HOUR, 2) + return locale.describe("hours", hours, only_distance=only_distance) + elif diff < self._SECS_PER_DAY * 2: + return locale.describe("day", sign, only_distance=only_distance) + elif diff < self._SECS_PER_WEEK: + days = sign * max(delta_second // self._SECS_PER_DAY, 2) + return locale.describe("days", days, only_distance=only_distance) + + elif diff < self._SECS_PER_WEEK * 2: + return locale.describe("week", sign, only_distance=only_distance) + elif diff < self._SECS_PER_MONTH: + weeks = sign * max(delta_second // self._SECS_PER_WEEK, 2) + return locale.describe("weeks", weeks, only_distance=only_distance) + + elif diff < self._SECS_PER_MONTH * 2: + return locale.describe("month", sign, only_distance=only_distance) + elif diff < self._SECS_PER_YEAR: + # TODO revisit for humanization during leap years + self_months = self._datetime.year * 12 + self._datetime.month + other_months = dt.year * 12 + dt.month + + months = sign * max(abs(other_months - self_months), 2) + + return locale.describe( + "months", months, only_distance=only_distance + ) + + elif diff < self._SECS_PER_YEAR * 2: + return locale.describe("year", sign, only_distance=only_distance) + else: + years = sign * max(delta_second // self._SECS_PER_YEAR, 2) + return locale.describe("years", years, only_distance=only_distance) + + elif isinstance(granularity, str): + granularity = cast(TimeFrameLiteral, granularity) # type: ignore[assignment] + + if granularity == "second": + delta = sign * float(delta_second) + if abs(delta) < 2: + return locale.describe("now", only_distance=only_distance) + elif granularity == "minute": + delta = sign * delta_second / self._SECS_PER_MINUTE + elif granularity == "hour": + delta = sign * delta_second / self._SECS_PER_HOUR + elif granularity == "day": + delta = sign * delta_second / self._SECS_PER_DAY + elif granularity == "week": + delta = sign * delta_second / self._SECS_PER_WEEK + elif granularity == "month": + delta = sign * delta_second / self._SECS_PER_MONTH + elif granularity == "quarter": + delta = sign * delta_second / self._SECS_PER_QUARTER + elif granularity == "year": + delta = sign * delta_second / self._SECS_PER_YEAR + else: + raise ValueError( + "Invalid level of granularity. " + "Please select between 'second', 'minute', 'hour', 'day', 'week', 'month', 'quarter' or 'year'." + ) + + if trunc(abs(delta)) != 1: + granularity += "s" # type: ignore + return locale.describe(granularity, delta, only_distance=only_distance) + + else: + + if not granularity: + raise ValueError( + "Empty granularity list provided. " + "Please select one or more from 'second', 'minute', 'hour', 'day', 'week', 'month', 'quarter', 'year'." + ) + + timeframes: List[Tuple[TimeFrameLiteral, float]] = [] + + def gather_timeframes(_delta: float, _frame: TimeFrameLiteral) -> float: + if _frame in granularity: + value = sign * _delta / self._SECS_MAP[_frame] + _delta %= self._SECS_MAP[_frame] + if trunc(abs(value)) != 1: + timeframes.append( + (cast(TimeFrameLiteral, _frame + "s"), value) + ) + else: + timeframes.append((_frame, value)) + return _delta + + delta = float(delta_second) + frames: Tuple[TimeFrameLiteral, ...] = ( + "year", + "quarter", + "month", + "week", + "day", + "hour", + "minute", + "second", + ) + for frame in frames: + delta = gather_timeframes(delta, frame) + + if len(timeframes) < len(granularity): + raise ValueError( + "Invalid level of granularity. " + "Please select between 'second', 'minute', 'hour', 'day', 'week', 'month', 'quarter' or 'year'." + ) + + return locale.describe_multi(timeframes, only_distance=only_distance) + + except KeyError as e: + raise ValueError( + f"Humanization of the {e} granularity is not currently translated in the {locale_name!r} locale. " + "Please consider making a contribution to this locale." + ) + + def dehumanize(self, input_string: str, locale: str = "en_us") -> "Arrow": + """Returns a new :class:`Arrow ` object, that represents + the time difference relative to the attrbiutes of the + :class:`Arrow ` object. + + :param timestring: a ``str`` representing a humanized relative time. + :param locale: (optional) a ``str`` specifying a locale. Defaults to 'en-us'. + + Usage:: + + >>> arw = arrow.utcnow() + >>> arw + + >>> earlier = arw.dehumanize("2 days ago") + >>> earlier + + + >>> arw = arrow.utcnow() + >>> arw + + >>> later = arw.dehumanize("in a month") + >>> later + + + """ + + # Create a locale object based off given local + locale_obj = locales.get_locale(locale) + + # Check to see if locale is supported + normalized_locale_name = locale.lower().replace("_", "-") + + if normalized_locale_name not in DEHUMANIZE_LOCALES: + raise ValueError( + f"Dehumanize does not currently support the {locale} locale, please consider making a contribution to add support for this locale." + ) + + current_time = self.fromdatetime(self._datetime) + + # Create an object containing the relative time info + time_object_info = dict.fromkeys( + ["seconds", "minutes", "hours", "days", "weeks", "months", "years"], 0 + ) + + # Create an object representing if unit has been seen + unit_visited = dict.fromkeys( + ["now", "seconds", "minutes", "hours", "days", "weeks", "months", "years"], + False, + ) + + # Create a regex pattern object for numbers + num_pattern = re.compile(r"\d+") + + # Search input string for each time unit within locale + for unit, unit_object in locale_obj.timeframes.items(): + + # Need to check the type of unit_object to create the correct dictionary + if isinstance(unit_object, Mapping): + strings_to_search = unit_object + else: + strings_to_search = {unit: str(unit_object)} + + # Search for any matches that exist for that locale's unit. + # Needs to cycle all through strings as some locales have strings that + # could overlap in a regex match, since input validation isn't being performed. + for time_delta, time_string in strings_to_search.items(): + + # Replace {0} with regex \d representing digits + search_string = str(time_string) + search_string = search_string.format(r"\d+") + + # Create search pattern and find within string + pattern = re.compile(rf"(^|\b|\d){search_string}") + match = pattern.search(input_string) + + # If there is no match continue to next iteration + if not match: + continue + + match_string = match.group() + num_match = num_pattern.search(match_string) + + # If no number matches + # Need for absolute value as some locales have signs included in their objects + if not num_match: + change_value = ( + 1 if not time_delta.isnumeric() else abs(int(time_delta)) + ) + else: + change_value = int(num_match.group()) + + # No time to update if now is the unit + if unit == "now": + unit_visited[unit] = True + continue + + # Add change value to the correct unit (incorporates the plurality that exists within timeframe i.e second v.s seconds) + time_unit_to_change = str(unit) + time_unit_to_change += ( + "s" if (str(time_unit_to_change)[-1] != "s") else "" + ) + time_object_info[time_unit_to_change] = change_value + unit_visited[time_unit_to_change] = True + + # Assert error if string does not modify any units + if not any([True for k, v in unit_visited.items() if v]): + raise ValueError( + "Input string not valid. Note: Some locales do not support the week granulairty in Arrow. " + "If you are attempting to use the week granularity on an unsupported locale, this could be the cause of this error." + ) + + # Sign logic + future_string = locale_obj.future + future_string = future_string.format(".*") + future_pattern = re.compile(rf"^{future_string}$") + future_pattern_match = future_pattern.findall(input_string) + + past_string = locale_obj.past + past_string = past_string.format(".*") + past_pattern = re.compile(rf"^{past_string}$") + past_pattern_match = past_pattern.findall(input_string) + + # If a string contains the now unit, there will be no relative units, hence the need to check if the now unit + # was visited before raising a ValueError + if past_pattern_match: + sign_val = -1 + elif future_pattern_match: + sign_val = 1 + elif unit_visited["now"]: + sign_val = 0 + else: + raise ValueError( + "Invalid input String. String does not contain any relative time information. " + "String should either represent a time in the future or a time in the past. " + "Ex: 'in 5 seconds' or '5 seconds ago'." + ) + + time_changes = {k: sign_val * v for k, v in time_object_info.items()} + + return current_time.shift(**time_changes) + + # query functions + + def is_between( + self, + start: "Arrow", + end: "Arrow", + bounds: _BOUNDS = "()", + ) -> bool: + """Returns a boolean denoting whether the :class:`Arrow ` object is between + the start and end limits. + + :param start: an :class:`Arrow ` object. + :param end: an :class:`Arrow ` object. + :param bounds: (optional) a ``str`` of either '()', '(]', '[)', or '[]' that specifies + whether to include or exclude the start and end values in the range. '(' excludes + the start, '[' includes the start, ')' excludes the end, and ']' includes the end. + If the bounds are not specified, the default bound '()' is used. + + Usage:: + + >>> start = arrow.get(datetime(2013, 5, 5, 12, 30, 10)) + >>> end = arrow.get(datetime(2013, 5, 5, 12, 30, 36)) + >>> arrow.get(datetime(2013, 5, 5, 12, 30, 27)).is_between(start, end) + True + + >>> start = arrow.get(datetime(2013, 5, 5)) + >>> end = arrow.get(datetime(2013, 5, 8)) + >>> arrow.get(datetime(2013, 5, 8)).is_between(start, end, '[]') + True + + >>> start = arrow.get(datetime(2013, 5, 5)) + >>> end = arrow.get(datetime(2013, 5, 8)) + >>> arrow.get(datetime(2013, 5, 8)).is_between(start, end, '[)') + False + + """ + + util.validate_bounds(bounds) + + if not isinstance(start, Arrow): + raise TypeError( + f"Cannot parse start date argument type of {type(start)!r}." + ) + + if not isinstance(end, Arrow): + raise TypeError(f"Cannot parse end date argument type of {type(start)!r}.") + + include_start = bounds[0] == "[" + include_end = bounds[1] == "]" + + target_ts = self.float_timestamp + start_ts = start.float_timestamp + end_ts = end.float_timestamp + + return ( + (start_ts <= target_ts <= end_ts) + and (include_start or start_ts < target_ts) + and (include_end or target_ts < end_ts) + ) + + # datetime methods + + def date(self) -> date: + """Returns a ``date`` object with the same year, month and day. + + Usage:: + + >>> arrow.utcnow().date() + datetime.date(2019, 1, 23) + + """ + + return self._datetime.date() + + def time(self) -> dt_time: + """Returns a ``time`` object with the same hour, minute, second, microsecond. + + Usage:: + + >>> arrow.utcnow().time() + datetime.time(12, 15, 34, 68352) + + """ + + return self._datetime.time() + + def timetz(self) -> dt_time: + """Returns a ``time`` object with the same hour, minute, second, microsecond and + tzinfo. + + Usage:: + + >>> arrow.utcnow().timetz() + datetime.time(12, 5, 18, 298893, tzinfo=tzutc()) + + """ + + return self._datetime.timetz() + + def astimezone(self, tz: Optional[dt_tzinfo]) -> dt_datetime: + """Returns a ``datetime`` object, converted to the specified timezone. + + :param tz: a ``tzinfo`` object. + + Usage:: + + >>> pacific=arrow.now('US/Pacific') + >>> nyc=arrow.now('America/New_York').tzinfo + >>> pacific.astimezone(nyc) + datetime.datetime(2019, 1, 20, 10, 24, 22, 328172, tzinfo=tzfile('/usr/share/zoneinfo/America/New_York')) + + """ + + return self._datetime.astimezone(tz) + + def utcoffset(self) -> Optional[timedelta]: + """Returns a ``timedelta`` object representing the whole number of minutes difference from + UTC time. + + Usage:: + + >>> arrow.now('US/Pacific').utcoffset() + datetime.timedelta(-1, 57600) + + """ + + return self._datetime.utcoffset() + + def dst(self) -> Optional[timedelta]: + """Returns the daylight savings time adjustment. + + Usage:: + + >>> arrow.utcnow().dst() + datetime.timedelta(0) + + """ + + return self._datetime.dst() + + def timetuple(self) -> struct_time: + """Returns a ``time.struct_time``, in the current timezone. + + Usage:: + + >>> arrow.utcnow().timetuple() + time.struct_time(tm_year=2019, tm_mon=1, tm_mday=20, tm_hour=15, tm_min=17, tm_sec=8, tm_wday=6, tm_yday=20, tm_isdst=0) + + """ + + return self._datetime.timetuple() + + def utctimetuple(self) -> struct_time: + """Returns a ``time.struct_time``, in UTC time. + + Usage:: + + >>> arrow.utcnow().utctimetuple() + time.struct_time(tm_year=2019, tm_mon=1, tm_mday=19, tm_hour=21, tm_min=41, tm_sec=7, tm_wday=5, tm_yday=19, tm_isdst=0) + + """ + + return self._datetime.utctimetuple() + + def toordinal(self) -> int: + """Returns the proleptic Gregorian ordinal of the date. + + Usage:: + + >>> arrow.utcnow().toordinal() + 737078 + + """ + + return self._datetime.toordinal() + + def weekday(self) -> int: + """Returns the day of the week as an integer (0-6). + + Usage:: + + >>> arrow.utcnow().weekday() + 5 + + """ + + return self._datetime.weekday() + + def isoweekday(self) -> int: + """Returns the ISO day of the week as an integer (1-7). + + Usage:: + + >>> arrow.utcnow().isoweekday() + 6 + + """ + + return self._datetime.isoweekday() + + def isocalendar(self) -> Tuple[int, int, int]: + """Returns a 3-tuple, (ISO year, ISO week number, ISO weekday). + + Usage:: + + >>> arrow.utcnow().isocalendar() + (2019, 3, 6) + + """ + + return self._datetime.isocalendar() + + def isoformat(self, sep: str = "T", timespec: str = "auto") -> str: + """Returns an ISO 8601 formatted representation of the date and time. + + Usage:: + + >>> arrow.utcnow().isoformat() + '2019-01-19T18:30:52.442118+00:00' + + """ + + return self._datetime.isoformat(sep, timespec) + + def ctime(self) -> str: + """Returns a ctime formatted representation of the date and time. + + Usage:: + + >>> arrow.utcnow().ctime() + 'Sat Jan 19 18:26:50 2019' + + """ + + return self._datetime.ctime() + + def strftime(self, format: str) -> str: + """Formats in the style of ``datetime.strftime``. + + :param format: the format string. + + Usage:: + + >>> arrow.utcnow().strftime('%d-%m-%Y %H:%M:%S') + '23-01-2019 12:28:17' + + """ + + return self._datetime.strftime(format) + + def for_json(self) -> str: + """Serializes for the ``for_json`` protocol of simplejson. + + Usage:: + + >>> arrow.utcnow().for_json() + '2019-01-19T18:25:36.760079+00:00' + + """ + + return self.isoformat() + + # math + + def __add__(self, other: Any) -> "Arrow": + + if isinstance(other, (timedelta, relativedelta)): + return self.fromdatetime(self._datetime + other, self._datetime.tzinfo) + + return NotImplemented + + def __radd__(self, other: Union[timedelta, relativedelta]) -> "Arrow": + return self.__add__(other) + + @overload + def __sub__(self, other: Union[timedelta, relativedelta]) -> "Arrow": + pass # pragma: no cover + + @overload + def __sub__(self, other: Union[dt_datetime, "Arrow"]) -> timedelta: + pass # pragma: no cover + + def __sub__(self, other: Any) -> Union[timedelta, "Arrow"]: + + if isinstance(other, (timedelta, relativedelta)): + return self.fromdatetime(self._datetime - other, self._datetime.tzinfo) + + elif isinstance(other, dt_datetime): + return self._datetime - other + + elif isinstance(other, Arrow): + return self._datetime - other._datetime + + return NotImplemented + + def __rsub__(self, other: Any) -> timedelta: + + if isinstance(other, dt_datetime): + return other - self._datetime + + return NotImplemented + + # comparisons + + def __eq__(self, other: Any) -> bool: + + if not isinstance(other, (Arrow, dt_datetime)): + return False + + return self._datetime == self._get_datetime(other) + + def __ne__(self, other: Any) -> bool: + + if not isinstance(other, (Arrow, dt_datetime)): + return True + + return not self.__eq__(other) + + def __gt__(self, other: Any) -> bool: + + if not isinstance(other, (Arrow, dt_datetime)): + return NotImplemented + + return self._datetime > self._get_datetime(other) + + def __ge__(self, other: Any) -> bool: + + if not isinstance(other, (Arrow, dt_datetime)): + return NotImplemented + + return self._datetime >= self._get_datetime(other) + + def __lt__(self, other: Any) -> bool: + + if not isinstance(other, (Arrow, dt_datetime)): + return NotImplemented + + return self._datetime < self._get_datetime(other) + + def __le__(self, other: Any) -> bool: + + if not isinstance(other, (Arrow, dt_datetime)): + return NotImplemented + + return self._datetime <= self._get_datetime(other) + + # internal methods + @staticmethod + def _get_tzinfo(tz_expr: Optional[TZ_EXPR]) -> dt_tzinfo: + """Get normalized tzinfo object from various inputs.""" + if tz_expr is None: + return dateutil_tz.tzutc() + if isinstance(tz_expr, dt_tzinfo): + return tz_expr + else: + try: + return parser.TzinfoParser.parse(tz_expr) + except parser.ParserError: + raise ValueError(f"{tz_expr!r} not recognized as a timezone.") + + @classmethod + def _get_datetime( + cls, expr: Union["Arrow", dt_datetime, int, float, str] + ) -> dt_datetime: + """Get datetime object from a specified expression.""" + if isinstance(expr, Arrow): + return expr.datetime + elif isinstance(expr, dt_datetime): + return expr + elif util.is_timestamp(expr): + timestamp = float(expr) + return cls.utcfromtimestamp(timestamp).datetime + else: + raise ValueError(f"{expr!r} not recognized as a datetime or timestamp.") + + @classmethod + def _get_frames(cls, name: _T_FRAMES) -> Tuple[str, str, int]: + """Finds relevant timeframe and steps for use in range and span methods. + + Returns a 3 element tuple in the form (frame, plural frame, step), for example ("day", "days", 1) + + """ + if name in cls._ATTRS: + return name, f"{name}s", 1 + elif name[-1] == "s" and name[:-1] in cls._ATTRS: + return name[:-1], name, 1 + elif name in ["week", "weeks"]: + return "week", "weeks", 1 + elif name in ["quarter", "quarters"]: + return "quarter", "months", 3 + else: + supported = ", ".join( + [ + "year(s)", + "month(s)", + "day(s)", + "hour(s)", + "minute(s)", + "second(s)", + "microsecond(s)", + "week(s)", + "quarter(s)", + ] + ) + raise ValueError( + f"Range or span over frame {name} not supported. Supported frames: {supported}." + ) + + @classmethod + def _get_iteration_params(cls, end: Any, limit: Optional[int]) -> Tuple[Any, int]: + """Sets default end and limit values for range method.""" + if end is None: + + if limit is None: + raise ValueError("One of 'end' or 'limit' is required.") + + return cls.max, limit + + else: + if limit is None: + return end, sys.maxsize + return end, limit + + @staticmethod + def _is_last_day_of_month(date: "Arrow") -> bool: + """Returns a boolean indicating whether the datetime is the last day of the month.""" + return date.day == calendar.monthrange(date.year, date.month)[1] + + +Arrow.min = Arrow.fromdatetime(dt_datetime.min) +Arrow.max = Arrow.fromdatetime(dt_datetime.max) diff --git a/third_party/python/arrow/arrow/constants.py b/third_party/python/arrow/arrow/constants.py new file mode 100644 index 0000000000000..53d163b999e3c --- /dev/null +++ b/third_party/python/arrow/arrow/constants.py @@ -0,0 +1,177 @@ +"""Constants used internally in arrow.""" + +import sys +from datetime import datetime + +if sys.version_info < (3, 8): # pragma: no cover + from typing_extensions import Final +else: + from typing import Final # pragma: no cover + +# datetime.max.timestamp() errors on Windows, so we must hardcode +# the highest possible datetime value that can output a timestamp. +# tl;dr platform-independent max timestamps are hard to form +# See: https://stackoverflow.com/q/46133223 +try: + # Get max timestamp. Works on POSIX-based systems like Linux and macOS, + # but will trigger an OverflowError, ValueError, or OSError on Windows + _MAX_TIMESTAMP = datetime.max.timestamp() +except (OverflowError, ValueError, OSError): # pragma: no cover + # Fallback for Windows and 32-bit systems if initial max timestamp call fails + # Must get max value of ctime on Windows based on architecture (x32 vs x64) + # https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/ctime-ctime32-ctime64-wctime-wctime32-wctime64 + # Note: this may occur on both 32-bit Linux systems (issue #930) along with Windows systems + is_64bits = sys.maxsize > 2**32 + _MAX_TIMESTAMP = ( + datetime(3000, 1, 1, 23, 59, 59, 999999).timestamp() + if is_64bits + else datetime(2038, 1, 1, 23, 59, 59, 999999).timestamp() + ) + +MAX_TIMESTAMP: Final[float] = _MAX_TIMESTAMP +MAX_TIMESTAMP_MS: Final[float] = MAX_TIMESTAMP * 1000 +MAX_TIMESTAMP_US: Final[float] = MAX_TIMESTAMP * 1_000_000 + +MAX_ORDINAL: Final[int] = datetime.max.toordinal() +MIN_ORDINAL: Final[int] = 1 + +DEFAULT_LOCALE: Final[str] = "en-us" + +# Supported dehumanize locales +DEHUMANIZE_LOCALES = { + "en", + "en-us", + "en-gb", + "en-au", + "en-be", + "en-jp", + "en-za", + "en-ca", + "en-ph", + "fr", + "fr-fr", + "fr-ca", + "it", + "it-it", + "es", + "es-es", + "el", + "el-gr", + "ja", + "ja-jp", + "se", + "se-fi", + "se-no", + "se-se", + "sv", + "sv-se", + "fi", + "fi-fi", + "zh", + "zh-cn", + "zh-tw", + "zh-hk", + "nl", + "nl-nl", + "be", + "be-by", + "pl", + "pl-pl", + "ru", + "ru-ru", + "af", + "bg", + "bg-bg", + "ua", + "uk", + "uk-ua", + "mk", + "mk-mk", + "de", + "de-de", + "de-ch", + "de-at", + "nb", + "nb-no", + "nn", + "nn-no", + "pt", + "pt-pt", + "pt-br", + "tl", + "tl-ph", + "vi", + "vi-vn", + "tr", + "tr-tr", + "az", + "az-az", + "da", + "da-dk", + "ml", + "hi", + "cs", + "cs-cz", + "sk", + "sk-sk", + "fa", + "fa-ir", + "mr", + "ca", + "ca-es", + "ca-ad", + "ca-fr", + "ca-it", + "eo", + "eo-xx", + "bn", + "bn-bd", + "bn-in", + "rm", + "rm-ch", + "ro", + "ro-ro", + "sl", + "sl-si", + "id", + "id-id", + "ne", + "ne-np", + "ee", + "et", + "sw", + "sw-ke", + "sw-tz", + "la", + "la-va", + "lt", + "lt-lt", + "ms", + "ms-my", + "ms-bn", + "or", + "or-in", + "lb", + "lb-lu", + "zu", + "zu-za", + "sq", + "sq-al", + "ta", + "ta-in", + "ta-lk", + "ur", + "ur-pk", + "ka", + "ka-ge", + "kk", + "kk-kz", + # "lo", + # "lo-la", + "am", + "am-et", + "hy-am", + "hy", + "uz", + "uz-uz", +} diff --git a/third_party/python/arrow/arrow/factory.py b/third_party/python/arrow/arrow/factory.py new file mode 100644 index 0000000000000..aad4af8bdec27 --- /dev/null +++ b/third_party/python/arrow/arrow/factory.py @@ -0,0 +1,348 @@ +""" +Implements the :class:`ArrowFactory ` class, +providing factory methods for common :class:`Arrow ` +construction scenarios. + +""" + + +import calendar +from datetime import date, datetime +from datetime import tzinfo as dt_tzinfo +from decimal import Decimal +from time import struct_time +from typing import Any, List, Optional, Tuple, Type, Union, overload + +from dateutil import tz as dateutil_tz + +from arrow import parser +from arrow.arrow import TZ_EXPR, Arrow +from arrow.constants import DEFAULT_LOCALE +from arrow.util import is_timestamp, iso_to_gregorian + + +class ArrowFactory: + """A factory for generating :class:`Arrow ` objects. + + :param type: (optional) the :class:`Arrow `-based class to construct from. + Defaults to :class:`Arrow `. + + """ + + type: Type[Arrow] + + def __init__(self, type: Type[Arrow] = Arrow) -> None: + self.type = type + + @overload + def get( + self, + *, + locale: str = DEFAULT_LOCALE, + tzinfo: Optional[TZ_EXPR] = None, + normalize_whitespace: bool = False, + ) -> Arrow: + ... # pragma: no cover + + @overload + def get( + self, + __obj: Union[ + Arrow, + datetime, + date, + struct_time, + dt_tzinfo, + int, + float, + str, + Tuple[int, int, int], + ], + *, + locale: str = DEFAULT_LOCALE, + tzinfo: Optional[TZ_EXPR] = None, + normalize_whitespace: bool = False, + ) -> Arrow: + ... # pragma: no cover + + @overload + def get( + self, + __arg1: Union[datetime, date], + __arg2: TZ_EXPR, + *, + locale: str = DEFAULT_LOCALE, + tzinfo: Optional[TZ_EXPR] = None, + normalize_whitespace: bool = False, + ) -> Arrow: + ... # pragma: no cover + + @overload + def get( + self, + __arg1: str, + __arg2: Union[str, List[str]], + *, + locale: str = DEFAULT_LOCALE, + tzinfo: Optional[TZ_EXPR] = None, + normalize_whitespace: bool = False, + ) -> Arrow: + ... # pragma: no cover + + def get(self, *args: Any, **kwargs: Any) -> Arrow: + """Returns an :class:`Arrow ` object based on flexible inputs. + + :param locale: (optional) a ``str`` specifying a locale for the parser. Defaults to 'en-us'. + :param tzinfo: (optional) a :ref:`timezone expression ` or tzinfo object. + Replaces the timezone unless using an input form that is explicitly UTC or specifies + the timezone in a positional argument. Defaults to UTC. + :param normalize_whitespace: (optional) a ``bool`` specifying whether or not to normalize + redundant whitespace (spaces, tabs, and newlines) in a datetime string before parsing. + Defaults to false. + + Usage:: + + >>> import arrow + + **No inputs** to get current UTC time:: + + >>> arrow.get() + + + **One** :class:`Arrow ` object, to get a copy. + + >>> arw = arrow.utcnow() + >>> arrow.get(arw) + + + **One** ``float`` or ``int``, convertible to a floating-point timestamp, to get + that timestamp in UTC:: + + >>> arrow.get(1367992474.293378) + + + >>> arrow.get(1367992474) + + + **One** ISO 8601-formatted ``str``, to parse it:: + + >>> arrow.get('2013-09-29T01:26:43.830580') + + + **One** ISO 8601-formatted ``str``, in basic format, to parse it:: + + >>> arrow.get('20160413T133656.456289') + + + **One** ``tzinfo``, to get the current time **converted** to that timezone:: + + >>> arrow.get(tz.tzlocal()) + + + **One** naive ``datetime``, to get that datetime in UTC:: + + >>> arrow.get(datetime(2013, 5, 5)) + + + **One** aware ``datetime``, to get that datetime:: + + >>> arrow.get(datetime(2013, 5, 5, tzinfo=tz.tzlocal())) + + + **One** naive ``date``, to get that date in UTC:: + + >>> arrow.get(date(2013, 5, 5)) + + + **One** time.struct time:: + + >>> arrow.get(gmtime(0)) + + + **One** iso calendar ``tuple``, to get that week date in UTC:: + + >>> arrow.get((2013, 18, 7)) + + + **Two** arguments, a naive or aware ``datetime``, and a replacement + :ref:`timezone expression `:: + + >>> arrow.get(datetime(2013, 5, 5), 'US/Pacific') + + + **Two** arguments, a naive ``date``, and a replacement + :ref:`timezone expression `:: + + >>> arrow.get(date(2013, 5, 5), 'US/Pacific') + + + **Two** arguments, both ``str``, to parse the first according to the format of the second:: + + >>> arrow.get('2013-05-05 12:30:45 America/Chicago', 'YYYY-MM-DD HH:mm:ss ZZZ') + + + **Two** arguments, first a ``str`` to parse and second a ``list`` of formats to try:: + + >>> arrow.get('2013-05-05 12:30:45', ['MM/DD/YYYY', 'YYYY-MM-DD HH:mm:ss']) + + + **Three or more** arguments, as for the direct constructor of an ``Arrow`` object:: + + >>> arrow.get(2013, 5, 5, 12, 30, 45) + + + """ + + arg_count = len(args) + locale = kwargs.pop("locale", DEFAULT_LOCALE) + tz = kwargs.get("tzinfo", None) + normalize_whitespace = kwargs.pop("normalize_whitespace", False) + + # if kwargs given, send to constructor unless only tzinfo provided + if len(kwargs) > 1: + arg_count = 3 + + # tzinfo kwarg is not provided + if len(kwargs) == 1 and tz is None: + arg_count = 3 + + # () -> now, @ tzinfo or utc + if arg_count == 0: + if isinstance(tz, str): + tz = parser.TzinfoParser.parse(tz) + return self.type.now(tzinfo=tz) + + if isinstance(tz, dt_tzinfo): + return self.type.now(tzinfo=tz) + + return self.type.utcnow() + + if arg_count == 1: + arg = args[0] + if isinstance(arg, Decimal): + arg = float(arg) + + # (None) -> raises an exception + if arg is None: + raise TypeError("Cannot parse argument of type None.") + + # try (int, float) -> from timestamp @ tzinfo + elif not isinstance(arg, str) and is_timestamp(arg): + if tz is None: + # set to UTC by default + tz = dateutil_tz.tzutc() + return self.type.fromtimestamp(arg, tzinfo=tz) + + # (Arrow) -> from the object's datetime @ tzinfo + elif isinstance(arg, Arrow): + return self.type.fromdatetime(arg.datetime, tzinfo=tz) + + # (datetime) -> from datetime @ tzinfo + elif isinstance(arg, datetime): + return self.type.fromdatetime(arg, tzinfo=tz) + + # (date) -> from date @ tzinfo + elif isinstance(arg, date): + return self.type.fromdate(arg, tzinfo=tz) + + # (tzinfo) -> now @ tzinfo + elif isinstance(arg, dt_tzinfo): + return self.type.now(tzinfo=arg) + + # (str) -> parse @ tzinfo + elif isinstance(arg, str): + dt = parser.DateTimeParser(locale).parse_iso(arg, normalize_whitespace) + return self.type.fromdatetime(dt, tzinfo=tz) + + # (struct_time) -> from struct_time + elif isinstance(arg, struct_time): + return self.type.utcfromtimestamp(calendar.timegm(arg)) + + # (iso calendar) -> convert then from date @ tzinfo + elif isinstance(arg, tuple) and len(arg) == 3: + d = iso_to_gregorian(*arg) + return self.type.fromdate(d, tzinfo=tz) + + else: + raise TypeError(f"Cannot parse single argument of type {type(arg)!r}.") + + elif arg_count == 2: + + arg_1, arg_2 = args[0], args[1] + + if isinstance(arg_1, datetime): + + # (datetime, tzinfo/str) -> fromdatetime @ tzinfo + if isinstance(arg_2, (dt_tzinfo, str)): + return self.type.fromdatetime(arg_1, tzinfo=arg_2) + else: + raise TypeError( + f"Cannot parse two arguments of types 'datetime', {type(arg_2)!r}." + ) + + elif isinstance(arg_1, date): + + # (date, tzinfo/str) -> fromdate @ tzinfo + if isinstance(arg_2, (dt_tzinfo, str)): + return self.type.fromdate(arg_1, tzinfo=arg_2) + else: + raise TypeError( + f"Cannot parse two arguments of types 'date', {type(arg_2)!r}." + ) + + # (str, format) -> parse @ tzinfo + elif isinstance(arg_1, str) and isinstance(arg_2, (str, list)): + dt = parser.DateTimeParser(locale).parse( + args[0], args[1], normalize_whitespace + ) + return self.type.fromdatetime(dt, tzinfo=tz) + + else: + raise TypeError( + f"Cannot parse two arguments of types {type(arg_1)!r} and {type(arg_2)!r}." + ) + + # 3+ args -> datetime-like via constructor + else: + return self.type(*args, **kwargs) + + def utcnow(self) -> Arrow: + """Returns an :class:`Arrow ` object, representing "now" in UTC time. + + Usage:: + + >>> import arrow + >>> arrow.utcnow() + + """ + + return self.type.utcnow() + + def now(self, tz: Optional[TZ_EXPR] = None) -> Arrow: + """Returns an :class:`Arrow ` object, representing "now" in the given + timezone. + + :param tz: (optional) A :ref:`timezone expression `. Defaults to local time. + + Usage:: + + >>> import arrow + >>> arrow.now() + + + >>> arrow.now('US/Pacific') + + + >>> arrow.now('+02:00') + + + >>> arrow.now('local') + + """ + + if tz is None: + tz = dateutil_tz.tzlocal() + elif not isinstance(tz, dt_tzinfo): + tz = parser.TzinfoParser.parse(tz) + + return self.type.now(tz) diff --git a/third_party/python/arrow/arrow/formatter.py b/third_party/python/arrow/arrow/formatter.py new file mode 100644 index 0000000000000..728bea1aafc6d --- /dev/null +++ b/third_party/python/arrow/arrow/formatter.py @@ -0,0 +1,152 @@ +"""Provides the :class:`Arrow ` class, an improved formatter for datetimes.""" + +import re +import sys +from datetime import datetime, timedelta +from typing import Optional, Pattern, cast + +from dateutil import tz as dateutil_tz + +from arrow import locales +from arrow.constants import DEFAULT_LOCALE + +if sys.version_info < (3, 8): # pragma: no cover + from typing_extensions import Final +else: + from typing import Final # pragma: no cover + + +FORMAT_ATOM: Final[str] = "YYYY-MM-DD HH:mm:ssZZ" +FORMAT_COOKIE: Final[str] = "dddd, DD-MMM-YYYY HH:mm:ss ZZZ" +FORMAT_RFC822: Final[str] = "ddd, DD MMM YY HH:mm:ss Z" +FORMAT_RFC850: Final[str] = "dddd, DD-MMM-YY HH:mm:ss ZZZ" +FORMAT_RFC1036: Final[str] = "ddd, DD MMM YY HH:mm:ss Z" +FORMAT_RFC1123: Final[str] = "ddd, DD MMM YYYY HH:mm:ss Z" +FORMAT_RFC2822: Final[str] = "ddd, DD MMM YYYY HH:mm:ss Z" +FORMAT_RFC3339: Final[str] = "YYYY-MM-DD HH:mm:ssZZ" +FORMAT_RSS: Final[str] = "ddd, DD MMM YYYY HH:mm:ss Z" +FORMAT_W3C: Final[str] = "YYYY-MM-DD HH:mm:ssZZ" + + +class DateTimeFormatter: + + # This pattern matches characters enclosed in square brackets are matched as + # an atomic group. For more info on atomic groups and how to they are + # emulated in Python's re library, see https://stackoverflow.com/a/13577411/2701578 + + _FORMAT_RE: Final[Pattern[str]] = re.compile( + r"(\[(?:(?=(?P[^]]))(?P=literal))*\]|YYY?Y?|MM?M?M?|Do|DD?D?D?|d?dd?d?|HH?|hh?|mm?|ss?|SS?S?S?S?S?|ZZ?Z?|a|A|X|x|W)" + ) + + locale: locales.Locale + + def __init__(self, locale: str = DEFAULT_LOCALE) -> None: + + self.locale = locales.get_locale(locale) + + def format(cls, dt: datetime, fmt: str) -> str: + + # FIXME: _format_token() is nullable + return cls._FORMAT_RE.sub( + lambda m: cast(str, cls._format_token(dt, m.group(0))), fmt + ) + + def _format_token(self, dt: datetime, token: Optional[str]) -> Optional[str]: + + if token and token.startswith("[") and token.endswith("]"): + return token[1:-1] + + if token == "YYYY": + return self.locale.year_full(dt.year) + if token == "YY": + return self.locale.year_abbreviation(dt.year) + + if token == "MMMM": + return self.locale.month_name(dt.month) + if token == "MMM": + return self.locale.month_abbreviation(dt.month) + if token == "MM": + return f"{dt.month:02d}" + if token == "M": + return f"{dt.month}" + + if token == "DDDD": + return f"{dt.timetuple().tm_yday:03d}" + if token == "DDD": + return f"{dt.timetuple().tm_yday}" + if token == "DD": + return f"{dt.day:02d}" + if token == "D": + return f"{dt.day}" + + if token == "Do": + return self.locale.ordinal_number(dt.day) + + if token == "dddd": + return self.locale.day_name(dt.isoweekday()) + if token == "ddd": + return self.locale.day_abbreviation(dt.isoweekday()) + if token == "d": + return f"{dt.isoweekday()}" + + if token == "HH": + return f"{dt.hour:02d}" + if token == "H": + return f"{dt.hour}" + if token == "hh": + return f"{dt.hour if 0 < dt.hour < 13 else abs(dt.hour - 12):02d}" + if token == "h": + return f"{dt.hour if 0 < dt.hour < 13 else abs(dt.hour - 12)}" + + if token == "mm": + return f"{dt.minute:02d}" + if token == "m": + return f"{dt.minute}" + + if token == "ss": + return f"{dt.second:02d}" + if token == "s": + return f"{dt.second}" + + if token == "SSSSSS": + return f"{dt.microsecond:06d}" + if token == "SSSSS": + return f"{dt.microsecond // 10:05d}" + if token == "SSSS": + return f"{dt.microsecond // 100:04d}" + if token == "SSS": + return f"{dt.microsecond // 1000:03d}" + if token == "SS": + return f"{dt.microsecond // 10000:02d}" + if token == "S": + return f"{dt.microsecond // 100000}" + + if token == "X": + return f"{dt.timestamp()}" + + if token == "x": + return f"{dt.timestamp() * 1_000_000:.0f}" + + if token == "ZZZ": + return dt.tzname() + + if token in ["ZZ", "Z"]: + separator = ":" if token == "ZZ" else "" + tz = dateutil_tz.tzutc() if dt.tzinfo is None else dt.tzinfo + # `dt` must be aware object. Otherwise, this line will raise AttributeError + # https://github.com/arrow-py/arrow/pull/883#discussion_r529866834 + # datetime awareness: https://docs.python.org/3/library/datetime.html#aware-and-naive-objects + total_minutes = int(cast(timedelta, tz.utcoffset(dt)).total_seconds() / 60) + + sign = "+" if total_minutes >= 0 else "-" + total_minutes = abs(total_minutes) + hour, minute = divmod(total_minutes, 60) + + return f"{sign}{hour:02d}{separator}{minute:02d}" + + if token in ("a", "A"): + return self.locale.meridian(dt.hour, token) + + if token == "W": + year, week, day = dt.isocalendar() + return f"{year}-W{week:02d}-{day}" diff --git a/third_party/python/arrow/arrow/locales.py b/third_party/python/arrow/arrow/locales.py new file mode 100644 index 0000000000000..3627497f57b9c --- /dev/null +++ b/third_party/python/arrow/arrow/locales.py @@ -0,0 +1,6475 @@ +"""Provides internationalization for arrow in over 60 languages and dialects.""" + +import sys +from math import trunc +from typing import ( + Any, + ClassVar, + Dict, + List, + Mapping, + Optional, + Sequence, + Tuple, + Type, + Union, + cast, +) + +if sys.version_info < (3, 8): # pragma: no cover + from typing_extensions import Literal +else: + from typing import Literal # pragma: no cover + +TimeFrameLiteral = Literal[ + "now", + "second", + "seconds", + "minute", + "minutes", + "hour", + "hours", + "day", + "days", + "week", + "weeks", + "month", + "months", + "quarter", + "quarters", + "year", + "years", +] + +_TimeFrameElements = Union[ + str, Sequence[str], Mapping[str, str], Mapping[str, Sequence[str]] +] + +_locale_map: Dict[str, Type["Locale"]] = {} + + +def get_locale(name: str) -> "Locale": + """Returns an appropriate :class:`Locale ` + corresponding to an input locale name. + + :param name: the name of the locale. + + """ + + normalized_locale_name = name.lower().replace("_", "-") + locale_cls = _locale_map.get(normalized_locale_name) + + if locale_cls is None: + raise ValueError(f"Unsupported locale {normalized_locale_name!r}.") + + return locale_cls() + + +def get_locale_by_class_name(name: str) -> "Locale": + """Returns an appropriate :class:`Locale ` + corresponding to an locale class name. + + :param name: the name of the locale class. + + """ + locale_cls: Optional[Type[Locale]] = globals().get(name) + + if locale_cls is None: + raise ValueError(f"Unsupported locale {name!r}.") + + return locale_cls() + + +class Locale: + """Represents locale-specific data and functionality.""" + + names: ClassVar[List[str]] = [] + + timeframes: ClassVar[Mapping[TimeFrameLiteral, _TimeFrameElements]] = { + "now": "", + "second": "", + "seconds": "", + "minute": "", + "minutes": "", + "hour": "", + "hours": "", + "day": "", + "days": "", + "week": "", + "weeks": "", + "month": "", + "months": "", + "quarter": "", + "quarters": "", + "year": "", + "years": "", + } + + meridians: ClassVar[Dict[str, str]] = {"am": "", "pm": "", "AM": "", "PM": ""} + + past: ClassVar[str] + future: ClassVar[str] + and_word: ClassVar[Optional[str]] = None + + month_names: ClassVar[List[str]] = [] + month_abbreviations: ClassVar[List[str]] = [] + + day_names: ClassVar[List[str]] = [] + day_abbreviations: ClassVar[List[str]] = [] + + ordinal_day_re: ClassVar[str] = r"(\d+)" + + _month_name_to_ordinal: Optional[Dict[str, int]] + + def __init_subclass__(cls, **kwargs: Any) -> None: + for locale_name in cls.names: + if locale_name in _locale_map: + raise LookupError(f"Duplicated locale name: {locale_name}") + + _locale_map[locale_name.lower().replace("_", "-")] = cls + + def __init__(self) -> None: + + self._month_name_to_ordinal = None + + def describe( + self, + timeframe: TimeFrameLiteral, + delta: Union[float, int] = 0, + only_distance: bool = False, + ) -> str: + """Describes a delta within a timeframe in plain language. + + :param timeframe: a string representing a timeframe. + :param delta: a quantity representing a delta in a timeframe. + :param only_distance: return only distance eg: "11 seconds" without "in" or "ago" keywords + """ + + humanized = self._format_timeframe(timeframe, trunc(delta)) + if not only_distance: + humanized = self._format_relative(humanized, timeframe, delta) + + return humanized + + def describe_multi( + self, + timeframes: Sequence[Tuple[TimeFrameLiteral, Union[int, float]]], + only_distance: bool = False, + ) -> str: + """Describes a delta within multiple timeframes in plain language. + + :param timeframes: a list of string, quantity pairs each representing a timeframe and delta. + :param only_distance: return only distance eg: "2 hours and 11 seconds" without "in" or "ago" keywords + """ + + parts = [ + self._format_timeframe(timeframe, trunc(delta)) + for timeframe, delta in timeframes + ] + if self.and_word: + parts.insert(-1, self.and_word) + humanized = " ".join(parts) + + if not only_distance: + # Needed to determine the correct relative string to use + timeframe_value = 0 + + for _unit_name, unit_value in timeframes: + if trunc(unit_value) != 0: + timeframe_value = trunc(unit_value) + break + + # Note it doesn't matter the timeframe unit we use on the call, only the value + humanized = self._format_relative(humanized, "seconds", timeframe_value) + + return humanized + + def day_name(self, day: int) -> str: + """Returns the day name for a specified day of the week. + + :param day: the ``int`` day of the week (1-7). + + """ + + return self.day_names[day] + + def day_abbreviation(self, day: int) -> str: + """Returns the day abbreviation for a specified day of the week. + + :param day: the ``int`` day of the week (1-7). + + """ + + return self.day_abbreviations[day] + + def month_name(self, month: int) -> str: + """Returns the month name for a specified month of the year. + + :param month: the ``int`` month of the year (1-12). + + """ + + return self.month_names[month] + + def month_abbreviation(self, month: int) -> str: + """Returns the month abbreviation for a specified month of the year. + + :param month: the ``int`` month of the year (1-12). + + """ + + return self.month_abbreviations[month] + + def month_number(self, name: str) -> Optional[int]: + """Returns the month number for a month specified by name or abbreviation. + + :param name: the month name or abbreviation. + + """ + + if self._month_name_to_ordinal is None: + self._month_name_to_ordinal = self._name_to_ordinal(self.month_names) + self._month_name_to_ordinal.update( + self._name_to_ordinal(self.month_abbreviations) + ) + + return self._month_name_to_ordinal.get(name) + + def year_full(self, year: int) -> str: + """Returns the year for specific locale if available + + :param year: the ``int`` year (4-digit) + """ + return f"{year:04d}" + + def year_abbreviation(self, year: int) -> str: + """Returns the year for specific locale if available + + :param year: the ``int`` year (4-digit) + """ + return f"{year:04d}"[2:] + + def meridian(self, hour: int, token: Any) -> Optional[str]: + """Returns the meridian indicator for a specified hour and format token. + + :param hour: the ``int`` hour of the day. + :param token: the format token. + """ + + if token == "a": + return self.meridians["am"] if hour < 12 else self.meridians["pm"] + if token == "A": + return self.meridians["AM"] if hour < 12 else self.meridians["PM"] + return None + + def ordinal_number(self, n: int) -> str: + """Returns the ordinal format of a given integer + + :param n: an integer + """ + return self._ordinal_number(n) + + def _ordinal_number(self, n: int) -> str: + return f"{n}" + + def _name_to_ordinal(self, lst: Sequence[str]) -> Dict[str, int]: + return {elem.lower(): i for i, elem in enumerate(lst[1:], 1)} + + def _format_timeframe(self, timeframe: TimeFrameLiteral, delta: int) -> str: + # TODO: remove cast + return cast(str, self.timeframes[timeframe]).format(trunc(abs(delta))) + + def _format_relative( + self, + humanized: str, + timeframe: TimeFrameLiteral, + delta: Union[float, int], + ) -> str: + + if timeframe == "now": + return humanized + + direction = self.past if delta < 0 else self.future + + return direction.format(humanized) + + +class EnglishLocale(Locale): + names = [ + "en", + "en-us", + "en-gb", + "en-au", + "en-be", + "en-jp", + "en-za", + "en-ca", + "en-ph", + ] + + past = "{0} ago" + future = "in {0}" + and_word = "and" + + timeframes = { + "now": "just now", + "second": "a second", + "seconds": "{0} seconds", + "minute": "a minute", + "minutes": "{0} minutes", + "hour": "an hour", + "hours": "{0} hours", + "day": "a day", + "days": "{0} days", + "week": "a week", + "weeks": "{0} weeks", + "month": "a month", + "months": "{0} months", + "quarter": "a quarter", + "quarters": "{0} quarters", + "year": "a year", + "years": "{0} years", + } + + meridians = {"am": "am", "pm": "pm", "AM": "AM", "PM": "PM"} + + month_names = [ + "", + "January", + "February", + "March", + "April", + "May", + "June", + "July", + "August", + "September", + "October", + "November", + "December", + ] + month_abbreviations = [ + "", + "Jan", + "Feb", + "Mar", + "Apr", + "May", + "Jun", + "Jul", + "Aug", + "Sep", + "Oct", + "Nov", + "Dec", + ] + + day_names = [ + "", + "Monday", + "Tuesday", + "Wednesday", + "Thursday", + "Friday", + "Saturday", + "Sunday", + ] + day_abbreviations = ["", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"] + + ordinal_day_re = r"((?P[2-3]?1(?=st)|[2-3]?2(?=nd)|[2-3]?3(?=rd)|[1-3]?[04-9](?=th)|1[1-3](?=th))(st|nd|rd|th))" + + def _ordinal_number(self, n: int) -> str: + if n % 100 not in (11, 12, 13): + remainder = abs(n) % 10 + if remainder == 1: + return f"{n}st" + elif remainder == 2: + return f"{n}nd" + elif remainder == 3: + return f"{n}rd" + return f"{n}th" + + def describe( + self, + timeframe: TimeFrameLiteral, + delta: Union[int, float] = 0, + only_distance: bool = False, + ) -> str: + """Describes a delta within a timeframe in plain language. + + :param timeframe: a string representing a timeframe. + :param delta: a quantity representing a delta in a timeframe. + :param only_distance: return only distance eg: "11 seconds" without "in" or "ago" keywords + """ + + humanized = super().describe(timeframe, delta, only_distance) + if only_distance and timeframe == "now": + humanized = "instantly" + + return humanized + + +class ItalianLocale(Locale): + names = ["it", "it-it"] + past = "{0} fa" + future = "tra {0}" + and_word = "e" + + timeframes = { + "now": "adesso", + "second": "un secondo", + "seconds": "{0} qualche secondo", + "minute": "un minuto", + "minutes": "{0} minuti", + "hour": "un'ora", + "hours": "{0} ore", + "day": "un giorno", + "days": "{0} giorni", + "week": "una settimana,", + "weeks": "{0} settimane", + "month": "un mese", + "months": "{0} mesi", + "year": "un anno", + "years": "{0} anni", + } + + month_names = [ + "", + "gennaio", + "febbraio", + "marzo", + "aprile", + "maggio", + "giugno", + "luglio", + "agosto", + "settembre", + "ottobre", + "novembre", + "dicembre", + ] + month_abbreviations = [ + "", + "gen", + "feb", + "mar", + "apr", + "mag", + "giu", + "lug", + "ago", + "set", + "ott", + "nov", + "dic", + ] + + day_names = [ + "", + "lunedì", + "martedì", + "mercoledì", + "giovedì", + "venerdì", + "sabato", + "domenica", + ] + day_abbreviations = ["", "lun", "mar", "mer", "gio", "ven", "sab", "dom"] + + ordinal_day_re = r"((?P[1-3]?[0-9](?=[ºª]))[ºª])" + + def _ordinal_number(self, n: int) -> str: + return f"{n}º" + + +class SpanishLocale(Locale): + names = ["es", "es-es"] + past = "hace {0}" + future = "en {0}" + and_word = "y" + + timeframes = { + "now": "ahora", + "second": "un segundo", + "seconds": "{0} segundos", + "minute": "un minuto", + "minutes": "{0} minutos", + "hour": "una hora", + "hours": "{0} horas", + "day": "un día", + "days": "{0} días", + "week": "una semana", + "weeks": "{0} semanas", + "month": "un mes", + "months": "{0} meses", + "year": "un año", + "years": "{0} años", + } + + meridians = {"am": "am", "pm": "pm", "AM": "AM", "PM": "PM"} + + month_names = [ + "", + "enero", + "febrero", + "marzo", + "abril", + "mayo", + "junio", + "julio", + "agosto", + "septiembre", + "octubre", + "noviembre", + "diciembre", + ] + month_abbreviations = [ + "", + "ene", + "feb", + "mar", + "abr", + "may", + "jun", + "jul", + "ago", + "sep", + "oct", + "nov", + "dic", + ] + + day_names = [ + "", + "lunes", + "martes", + "miércoles", + "jueves", + "viernes", + "sábado", + "domingo", + ] + day_abbreviations = ["", "lun", "mar", "mie", "jue", "vie", "sab", "dom"] + + ordinal_day_re = r"((?P[1-3]?[0-9](?=[ºª]))[ºª])" + + def _ordinal_number(self, n: int) -> str: + return f"{n}º" + + +class FrenchBaseLocale(Locale): + past = "il y a {0}" + future = "dans {0}" + and_word = "et" + + timeframes = { + "now": "maintenant", + "second": "une seconde", + "seconds": "{0} secondes", + "minute": "une minute", + "minutes": "{0} minutes", + "hour": "une heure", + "hours": "{0} heures", + "day": "un jour", + "days": "{0} jours", + "week": "une semaine", + "weeks": "{0} semaines", + "month": "un mois", + "months": "{0} mois", + "year": "un an", + "years": "{0} ans", + } + + month_names = [ + "", + "janvier", + "février", + "mars", + "avril", + "mai", + "juin", + "juillet", + "août", + "septembre", + "octobre", + "novembre", + "décembre", + ] + + day_names = [ + "", + "lundi", + "mardi", + "mercredi", + "jeudi", + "vendredi", + "samedi", + "dimanche", + ] + day_abbreviations = ["", "lun", "mar", "mer", "jeu", "ven", "sam", "dim"] + + ordinal_day_re = ( + r"((?P\b1(?=er\b)|[1-3]?[02-9](?=e\b)|[1-3]1(?=e\b))(er|e)\b)" + ) + + def _ordinal_number(self, n: int) -> str: + if abs(n) == 1: + return f"{n}er" + return f"{n}e" + + +class FrenchLocale(FrenchBaseLocale, Locale): + names = ["fr", "fr-fr"] + + month_abbreviations = [ + "", + "janv", + "févr", + "mars", + "avr", + "mai", + "juin", + "juil", + "août", + "sept", + "oct", + "nov", + "déc", + ] + + +class FrenchCanadianLocale(FrenchBaseLocale, Locale): + names = ["fr-ca"] + + month_abbreviations = [ + "", + "janv", + "févr", + "mars", + "avr", + "mai", + "juin", + "juill", + "août", + "sept", + "oct", + "nov", + "déc", + ] + + +class GreekLocale(Locale): + names = ["el", "el-gr"] + + past = "{0} πριν" + future = "σε {0}" + and_word = "και" + + timeframes = { + "now": "τώρα", + "second": "ένα δεύτερο", + "seconds": "{0} δευτερόλεπτα", + "minute": "ένα λεπτό", + "minutes": "{0} λεπτά", + "hour": "μία ώρα", + "hours": "{0} ώρες", + "day": "μία μέρα", + "days": "{0} μέρες", + "week": "μία εβδομάδα", + "weeks": "{0} εβδομάδες", + "month": "ένα μήνα", + "months": "{0} μήνες", + "year": "ένα χρόνο", + "years": "{0} χρόνια", + } + + month_names = [ + "", + "Ιανουαρίου", + "Φεβρουαρίου", + "Μαρτίου", + "Απριλίου", + "Μαΐου", + "Ιουνίου", + "Ιουλίου", + "Αυγούστου", + "Σεπτεμβρίου", + "Οκτωβρίου", + "Νοεμβρίου", + "Δεκεμβρίου", + ] + month_abbreviations = [ + "", + "Ιαν", + "Φεβ", + "Μαρ", + "Απρ", + "Μαϊ", + "Ιον", + "Ιολ", + "Αυγ", + "Σεπ", + "Οκτ", + "Νοε", + "Δεκ", + ] + + day_names = [ + "", + "Δευτέρα", + "Τρίτη", + "Τετάρτη", + "Πέμπτη", + "Παρασκευή", + "Σάββατο", + "Κυριακή", + ] + day_abbreviations = ["", "Δευ", "Τρι", "Τετ", "Πεμ", "Παρ", "Σαβ", "Κυρ"] + + +class JapaneseLocale(Locale): + names = ["ja", "ja-jp"] + + past = "{0}前" + future = "{0}後" + and_word = "" + + timeframes = { + "now": "現在", + "second": "1秒", + "seconds": "{0}秒", + "minute": "1分", + "minutes": "{0}分", + "hour": "1時間", + "hours": "{0}時間", + "day": "1日", + "days": "{0}日", + "week": "1週間", + "weeks": "{0}週間", + "month": "1ヶ月", + "months": "{0}ヶ月", + "year": "1年", + "years": "{0}年", + } + + month_names = [ + "", + "1月", + "2月", + "3月", + "4月", + "5月", + "6月", + "7月", + "8月", + "9月", + "10月", + "11月", + "12月", + ] + month_abbreviations = [ + "", + " 1", + " 2", + " 3", + " 4", + " 5", + " 6", + " 7", + " 8", + " 9", + "10", + "11", + "12", + ] + + day_names = ["", "月曜日", "火曜日", "水曜日", "木曜日", "金曜日", "土曜日", "日曜日"] + day_abbreviations = ["", "月", "火", "水", "木", "金", "土", "日"] + + +class SwedishLocale(Locale): + names = ["sv", "sv-se"] + + past = "för {0} sen" + future = "om {0}" + and_word = "och" + + timeframes = { + "now": "just nu", + "second": "en sekund", + "seconds": "{0} sekunder", + "minute": "en minut", + "minutes": "{0} minuter", + "hour": "en timme", + "hours": "{0} timmar", + "day": "en dag", + "days": "{0} dagar", + "week": "en vecka", + "weeks": "{0} veckor", + "month": "en månad", + "months": "{0} månader", + "year": "ett år", + "years": "{0} år", + } + + month_names = [ + "", + "januari", + "februari", + "mars", + "april", + "maj", + "juni", + "juli", + "augusti", + "september", + "oktober", + "november", + "december", + ] + month_abbreviations = [ + "", + "jan", + "feb", + "mar", + "apr", + "maj", + "jun", + "jul", + "aug", + "sep", + "okt", + "nov", + "dec", + ] + + day_names = [ + "", + "måndag", + "tisdag", + "onsdag", + "torsdag", + "fredag", + "lördag", + "söndag", + ] + day_abbreviations = ["", "mån", "tis", "ons", "tor", "fre", "lör", "sön"] + + +class FinnishLocale(Locale): + names = ["fi", "fi-fi"] + + # The finnish grammar is very complex, and its hard to convert + # 1-to-1 to something like English. + + past = "{0} sitten" + future = "{0} kuluttua" + + timeframes: ClassVar[Mapping[TimeFrameLiteral, Union[str, Mapping[str, str]]]] = { + "now": "juuri nyt", + "second": "sekunti", + "seconds": {"past": "{0} muutama sekunti", "future": "{0} muutaman sekunnin"}, + "minute": {"past": "minuutti", "future": "minuutin"}, + "minutes": {"past": "{0} minuuttia", "future": "{0} minuutin"}, + "hour": {"past": "tunti", "future": "tunnin"}, + "hours": {"past": "{0} tuntia", "future": "{0} tunnin"}, + "day": "päivä", + "days": {"past": "{0} päivää", "future": "{0} päivän"}, + "month": {"past": "kuukausi", "future": "kuukauden"}, + "months": {"past": "{0} kuukautta", "future": "{0} kuukauden"}, + "year": {"past": "vuosi", "future": "vuoden"}, + "years": {"past": "{0} vuotta", "future": "{0} vuoden"}, + } + + # Months and days are lowercase in Finnish + month_names = [ + "", + "tammikuu", + "helmikuu", + "maaliskuu", + "huhtikuu", + "toukokuu", + "kesäkuu", + "heinäkuu", + "elokuu", + "syyskuu", + "lokakuu", + "marraskuu", + "joulukuu", + ] + + month_abbreviations = [ + "", + "tammi", + "helmi", + "maalis", + "huhti", + "touko", + "kesä", + "heinä", + "elo", + "syys", + "loka", + "marras", + "joulu", + ] + + day_names = [ + "", + "maanantai", + "tiistai", + "keskiviikko", + "torstai", + "perjantai", + "lauantai", + "sunnuntai", + ] + + day_abbreviations = ["", "ma", "ti", "ke", "to", "pe", "la", "su"] + + def _format_timeframe(self, timeframe: TimeFrameLiteral, delta: int) -> str: + form = self.timeframes[timeframe] + + if isinstance(form, Mapping): + if delta < 0: + form = form["past"] + else: + form = form["future"] + + return form.format(abs(delta)) + + def _ordinal_number(self, n: int) -> str: + return f"{n}." + + +class ChineseCNLocale(Locale): + names = ["zh", "zh-cn"] + + past = "{0}前" + future = "{0}后" + + timeframes = { + "now": "刚才", + "second": "1秒", + "seconds": "{0}秒", + "minute": "1分钟", + "minutes": "{0}分钟", + "hour": "1小时", + "hours": "{0}小时", + "day": "1天", + "days": "{0}天", + "week": "1周", + "weeks": "{0}周", + "month": "1个月", + "months": "{0}个月", + "year": "1年", + "years": "{0}年", + } + + month_names = [ + "", + "一月", + "二月", + "三月", + "四月", + "五月", + "六月", + "七月", + "八月", + "九月", + "十月", + "十一月", + "十二月", + ] + month_abbreviations = [ + "", + " 1", + " 2", + " 3", + " 4", + " 5", + " 6", + " 7", + " 8", + " 9", + "10", + "11", + "12", + ] + + day_names = ["", "星期一", "星期二", "星期三", "星期四", "星期五", "星期六", "星期日"] + day_abbreviations = ["", "一", "二", "三", "四", "五", "六", "日"] + + +class ChineseTWLocale(Locale): + names = ["zh-tw"] + + past = "{0}前" + future = "{0}後" + and_word = "和" + + timeframes = { + "now": "剛才", + "second": "1秒", + "seconds": "{0}秒", + "minute": "1分鐘", + "minutes": "{0}分鐘", + "hour": "1小時", + "hours": "{0}小時", + "day": "1天", + "days": "{0}天", + "week": "1週", + "weeks": "{0}週", + "month": "1個月", + "months": "{0}個月", + "year": "1年", + "years": "{0}年", + } + + month_names = [ + "", + "1月", + "2月", + "3月", + "4月", + "5月", + "6月", + "7月", + "8月", + "9月", + "10月", + "11月", + "12月", + ] + month_abbreviations = [ + "", + " 1", + " 2", + " 3", + " 4", + " 5", + " 6", + " 7", + " 8", + " 9", + "10", + "11", + "12", + ] + + day_names = ["", "週一", "週二", "週三", "週四", "週五", "週六", "週日"] + day_abbreviations = ["", "一", "二", "三", "四", "五", "六", "日"] + + +class HongKongLocale(Locale): + names = ["zh-hk"] + + past = "{0}前" + future = "{0}後" + + timeframes = { + "now": "剛才", + "second": "1秒", + "seconds": "{0}秒", + "minute": "1分鐘", + "minutes": "{0}分鐘", + "hour": "1小時", + "hours": "{0}小時", + "day": "1天", + "days": "{0}天", + "week": "1星期", + "weeks": "{0}星期", + "month": "1個月", + "months": "{0}個月", + "year": "1年", + "years": "{0}年", + } + + month_names = [ + "", + "1月", + "2月", + "3月", + "4月", + "5月", + "6月", + "7月", + "8月", + "9月", + "10月", + "11月", + "12月", + ] + month_abbreviations = [ + "", + " 1", + " 2", + " 3", + " 4", + " 5", + " 6", + " 7", + " 8", + " 9", + "10", + "11", + "12", + ] + + day_names = ["", "星期一", "星期二", "星期三", "星期四", "星期五", "星期六", "星期日"] + day_abbreviations = ["", "一", "二", "三", "四", "五", "六", "日"] + + +class KoreanLocale(Locale): + names = ["ko", "ko-kr"] + + past = "{0} 전" + future = "{0} 후" + + timeframes = { + "now": "지금", + "second": "1초", + "seconds": "{0}초", + "minute": "1분", + "minutes": "{0}분", + "hour": "한시간", + "hours": "{0}시간", + "day": "하루", + "days": "{0}일", + "week": "1주", + "weeks": "{0}주", + "month": "한달", + "months": "{0}개월", + "year": "1년", + "years": "{0}년", + } + + special_dayframes = { + -3: "그끄제", + -2: "그제", + -1: "어제", + 1: "내일", + 2: "모레", + 3: "글피", + 4: "그글피", + } + + special_yearframes = {-2: "제작년", -1: "작년", 1: "내년", 2: "내후년"} + + month_names = [ + "", + "1월", + "2월", + "3월", + "4월", + "5월", + "6월", + "7월", + "8월", + "9월", + "10월", + "11월", + "12월", + ] + month_abbreviations = [ + "", + " 1", + " 2", + " 3", + " 4", + " 5", + " 6", + " 7", + " 8", + " 9", + "10", + "11", + "12", + ] + + day_names = ["", "월요일", "화요일", "수요일", "목요일", "금요일", "토요일", "일요일"] + day_abbreviations = ["", "월", "화", "수", "목", "금", "토", "일"] + + def _ordinal_number(self, n: int) -> str: + ordinals = ["0", "첫", "두", "세", "네", "다섯", "여섯", "일곱", "여덟", "아홉", "열"] + if n < len(ordinals): + return f"{ordinals[n]}번째" + return f"{n}번째" + + def _format_relative( + self, + humanized: str, + timeframe: TimeFrameLiteral, + delta: Union[float, int], + ) -> str: + if timeframe in ("day", "days"): + special = self.special_dayframes.get(int(delta)) + if special: + return special + elif timeframe in ("year", "years"): + special = self.special_yearframes.get(int(delta)) + if special: + return special + + return super()._format_relative(humanized, timeframe, delta) + + +# derived locale types & implementations. +class DutchLocale(Locale): + names = ["nl", "nl-nl"] + + past = "{0} geleden" + future = "over {0}" + + timeframes = { + "now": "nu", + "second": "een seconde", + "seconds": "{0} seconden", + "minute": "een minuut", + "minutes": "{0} minuten", + "hour": "een uur", + "hours": "{0} uur", + "day": "een dag", + "days": "{0} dagen", + "week": "een week", + "weeks": "{0} weken", + "month": "een maand", + "months": "{0} maanden", + "year": "een jaar", + "years": "{0} jaar", + } + + # In Dutch names of months and days are not starting with a capital letter + # like in the English language. + month_names = [ + "", + "januari", + "februari", + "maart", + "april", + "mei", + "juni", + "juli", + "augustus", + "september", + "oktober", + "november", + "december", + ] + month_abbreviations = [ + "", + "jan", + "feb", + "mrt", + "apr", + "mei", + "jun", + "jul", + "aug", + "sep", + "okt", + "nov", + "dec", + ] + + day_names = [ + "", + "maandag", + "dinsdag", + "woensdag", + "donderdag", + "vrijdag", + "zaterdag", + "zondag", + ] + day_abbreviations = ["", "ma", "di", "wo", "do", "vr", "za", "zo"] + + +class SlavicBaseLocale(Locale): + timeframes: ClassVar[Mapping[TimeFrameLiteral, Union[str, Mapping[str, str]]]] + + def _format_timeframe(self, timeframe: TimeFrameLiteral, delta: int) -> str: + form = self.timeframes[timeframe] + delta = abs(delta) + + if isinstance(form, Mapping): + if delta % 10 == 1 and delta % 100 != 11: + form = form["singular"] + elif 2 <= delta % 10 <= 4 and (delta % 100 < 10 or delta % 100 >= 20): + form = form["dual"] + else: + form = form["plural"] + + return form.format(delta) + + +class BelarusianLocale(SlavicBaseLocale): + names = ["be", "be-by"] + + past = "{0} таму" + future = "праз {0}" + + timeframes: ClassVar[Mapping[TimeFrameLiteral, Union[str, Mapping[str, str]]]] = { + "now": "зараз", + "second": "секунду", + "seconds": "{0} некалькі секунд", + "minute": "хвіліну", + "minutes": { + "singular": "{0} хвіліну", + "dual": "{0} хвіліны", + "plural": "{0} хвілін", + }, + "hour": "гадзіну", + "hours": { + "singular": "{0} гадзіну", + "dual": "{0} гадзіны", + "plural": "{0} гадзін", + }, + "day": "дзень", + "days": {"singular": "{0} дзень", "dual": "{0} дні", "plural": "{0} дзён"}, + "month": "месяц", + "months": { + "singular": "{0} месяц", + "dual": "{0} месяцы", + "plural": "{0} месяцаў", + }, + "year": "год", + "years": {"singular": "{0} год", "dual": "{0} гады", "plural": "{0} гадоў"}, + } + + month_names = [ + "", + "студзеня", + "лютага", + "сакавіка", + "красавіка", + "траўня", + "чэрвеня", + "ліпеня", + "жніўня", + "верасня", + "кастрычніка", + "лістапада", + "снежня", + ] + month_abbreviations = [ + "", + "студ", + "лют", + "сак", + "крас", + "трав", + "чэрв", + "ліп", + "жнів", + "вер", + "каст", + "ліст", + "снеж", + ] + + day_names = [ + "", + "панядзелак", + "аўторак", + "серада", + "чацвер", + "пятніца", + "субота", + "нядзеля", + ] + day_abbreviations = ["", "пн", "ат", "ср", "чц", "пт", "сб", "нд"] + + +class PolishLocale(SlavicBaseLocale): + names = ["pl", "pl-pl"] + + past = "{0} temu" + future = "za {0}" + + # The nouns should be in genitive case (Polish: "dopełniacz") + # in order to correctly form `past` & `future` expressions. + timeframes: ClassVar[Mapping[TimeFrameLiteral, Union[str, Mapping[str, str]]]] = { + "now": "teraz", + "second": "sekundę", + "seconds": { + "singular": "{0} sekund", + "dual": "{0} sekundy", + "plural": "{0} sekund", + }, + "minute": "minutę", + "minutes": { + "singular": "{0} minut", + "dual": "{0} minuty", + "plural": "{0} minut", + }, + "hour": "godzinę", + "hours": { + "singular": "{0} godzin", + "dual": "{0} godziny", + "plural": "{0} godzin", + }, + "day": "dzień", + "days": "{0} dni", + "week": "tydzień", + "weeks": { + "singular": "{0} tygodni", + "dual": "{0} tygodnie", + "plural": "{0} tygodni", + }, + "month": "miesiąc", + "months": { + "singular": "{0} miesięcy", + "dual": "{0} miesiące", + "plural": "{0} miesięcy", + }, + "year": "rok", + "years": {"singular": "{0} lat", "dual": "{0} lata", "plural": "{0} lat"}, + } + + month_names = [ + "", + "styczeń", + "luty", + "marzec", + "kwiecień", + "maj", + "czerwiec", + "lipiec", + "sierpień", + "wrzesień", + "październik", + "listopad", + "grudzień", + ] + month_abbreviations = [ + "", + "sty", + "lut", + "mar", + "kwi", + "maj", + "cze", + "lip", + "sie", + "wrz", + "paź", + "lis", + "gru", + ] + + day_names = [ + "", + "poniedziałek", + "wtorek", + "środa", + "czwartek", + "piątek", + "sobota", + "niedziela", + ] + day_abbreviations = ["", "Pn", "Wt", "Śr", "Czw", "Pt", "So", "Nd"] + + +class RussianLocale(SlavicBaseLocale): + names = ["ru", "ru-ru"] + + past = "{0} назад" + future = "через {0}" + + timeframes: ClassVar[Mapping[TimeFrameLiteral, Union[str, Mapping[str, str]]]] = { + "now": "сейчас", + "second": "секунда", + "seconds": { + "singular": "{0} секунду", + "dual": "{0} секунды", + "plural": "{0} секунд", + }, + "minute": "минуту", + "minutes": { + "singular": "{0} минуту", + "dual": "{0} минуты", + "plural": "{0} минут", + }, + "hour": "час", + "hours": {"singular": "{0} час", "dual": "{0} часа", "plural": "{0} часов"}, + "day": "день", + "days": {"singular": "{0} день", "dual": "{0} дня", "plural": "{0} дней"}, + "week": "неделю", + "weeks": { + "singular": "{0} неделю", + "dual": "{0} недели", + "plural": "{0} недель", + }, + "month": "месяц", + "months": { + "singular": "{0} месяц", + "dual": "{0} месяца", + "plural": "{0} месяцев", + }, + "quarter": "квартал", + "quarters": { + "singular": "{0} квартал", + "dual": "{0} квартала", + "plural": "{0} кварталов", + }, + "year": "год", + "years": {"singular": "{0} год", "dual": "{0} года", "plural": "{0} лет"}, + } + + month_names = [ + "", + "января", + "февраля", + "марта", + "апреля", + "мая", + "июня", + "июля", + "августа", + "сентября", + "октября", + "ноября", + "декабря", + ] + month_abbreviations = [ + "", + "янв", + "фев", + "мар", + "апр", + "май", + "июн", + "июл", + "авг", + "сен", + "окт", + "ноя", + "дек", + ] + + day_names = [ + "", + "понедельник", + "вторник", + "среда", + "четверг", + "пятница", + "суббота", + "воскресенье", + ] + day_abbreviations = ["", "пн", "вт", "ср", "чт", "пт", "сб", "вс"] + + +class AfrikaansLocale(Locale): + names = ["af", "af-nl"] + + past = "{0} gelede" + future = "in {0}" + + timeframes = { + "now": "nou", + "second": "n sekonde", + "seconds": "{0} sekondes", + "minute": "minuut", + "minutes": "{0} minute", + "hour": "uur", + "hours": "{0} ure", + "day": "een dag", + "days": "{0} dae", + "month": "een maand", + "months": "{0} maande", + "year": "een jaar", + "years": "{0} jaar", + } + + month_names = [ + "", + "Januarie", + "Februarie", + "Maart", + "April", + "Mei", + "Junie", + "Julie", + "Augustus", + "September", + "Oktober", + "November", + "Desember", + ] + month_abbreviations = [ + "", + "Jan", + "Feb", + "Mrt", + "Apr", + "Mei", + "Jun", + "Jul", + "Aug", + "Sep", + "Okt", + "Nov", + "Des", + ] + + day_names = [ + "", + "Maandag", + "Dinsdag", + "Woensdag", + "Donderdag", + "Vrydag", + "Saterdag", + "Sondag", + ] + day_abbreviations = ["", "Ma", "Di", "Wo", "Do", "Vr", "Za", "So"] + + +class BulgarianLocale(SlavicBaseLocale): + names = ["bg", "bg-bg"] + + past = "{0} назад" + future = "напред {0}" + + timeframes: ClassVar[Mapping[TimeFrameLiteral, Union[str, Mapping[str, str]]]] = { + "now": "сега", + "second": "секунда", + "seconds": "{0} няколко секунди", + "minute": "минута", + "minutes": { + "singular": "{0} минута", + "dual": "{0} минути", + "plural": "{0} минути", + }, + "hour": "час", + "hours": {"singular": "{0} час", "dual": "{0} часа", "plural": "{0} часа"}, + "day": "ден", + "days": {"singular": "{0} ден", "dual": "{0} дни", "plural": "{0} дни"}, + "month": "месец", + "months": { + "singular": "{0} месец", + "dual": "{0} месеца", + "plural": "{0} месеца", + }, + "year": "година", + "years": { + "singular": "{0} година", + "dual": "{0} години", + "plural": "{0} години", + }, + } + + month_names = [ + "", + "януари", + "февруари", + "март", + "април", + "май", + "юни", + "юли", + "август", + "септември", + "октомври", + "ноември", + "декември", + ] + month_abbreviations = [ + "", + "ян", + "февр", + "март", + "апр", + "май", + "юни", + "юли", + "авг", + "септ", + "окт", + "ноем", + "дек", + ] + + day_names = [ + "", + "понеделник", + "вторник", + "сряда", + "четвъртък", + "петък", + "събота", + "неделя", + ] + day_abbreviations = ["", "пон", "вт", "ср", "четв", "пет", "съб", "нед"] + + +class UkrainianLocale(SlavicBaseLocale): + names = ["ua", "uk", "uk-ua"] + + past = "{0} тому" + future = "за {0}" + + timeframes: ClassVar[Mapping[TimeFrameLiteral, Union[str, Mapping[str, str]]]] = { + "now": "зараз", + "second": "секунда", + "seconds": "{0} кілька секунд", + "minute": "хвилину", + "minutes": { + "singular": "{0} хвилину", + "dual": "{0} хвилини", + "plural": "{0} хвилин", + }, + "hour": "годину", + "hours": { + "singular": "{0} годину", + "dual": "{0} години", + "plural": "{0} годин", + }, + "day": "день", + "days": {"singular": "{0} день", "dual": "{0} дні", "plural": "{0} днів"}, + "month": "місяць", + "months": { + "singular": "{0} місяць", + "dual": "{0} місяці", + "plural": "{0} місяців", + }, + "year": "рік", + "years": {"singular": "{0} рік", "dual": "{0} роки", "plural": "{0} років"}, + } + + month_names = [ + "", + "січня", + "лютого", + "березня", + "квітня", + "травня", + "червня", + "липня", + "серпня", + "вересня", + "жовтня", + "листопада", + "грудня", + ] + month_abbreviations = [ + "", + "січ", + "лют", + "бер", + "квіт", + "трав", + "черв", + "лип", + "серп", + "вер", + "жовт", + "лист", + "груд", + ] + + day_names = [ + "", + "понеділок", + "вівторок", + "середа", + "четвер", + "п’ятниця", + "субота", + "неділя", + ] + day_abbreviations = ["", "пн", "вт", "ср", "чт", "пт", "сб", "нд"] + + +class MacedonianLocale(SlavicBaseLocale): + names = ["mk", "mk-mk"] + + past = "пред {0}" + future = "за {0}" + + timeframes: ClassVar[Mapping[TimeFrameLiteral, Union[str, Mapping[str, str]]]] = { + "now": "сега", + "second": "една секунда", + "seconds": { + "singular": "{0} секунда", + "dual": "{0} секунди", + "plural": "{0} секунди", + }, + "minute": "една минута", + "minutes": { + "singular": "{0} минута", + "dual": "{0} минути", + "plural": "{0} минути", + }, + "hour": "еден саат", + "hours": {"singular": "{0} саат", "dual": "{0} саати", "plural": "{0} саати"}, + "day": "еден ден", + "days": {"singular": "{0} ден", "dual": "{0} дена", "plural": "{0} дена"}, + "week": "една недела", + "weeks": { + "singular": "{0} недела", + "dual": "{0} недели", + "plural": "{0} недели", + }, + "month": "еден месец", + "months": { + "singular": "{0} месец", + "dual": "{0} месеци", + "plural": "{0} месеци", + }, + "year": "една година", + "years": { + "singular": "{0} година", + "dual": "{0} години", + "plural": "{0} години", + }, + } + + meridians = {"am": "дп", "pm": "пп", "AM": "претпладне", "PM": "попладне"} + + month_names = [ + "", + "Јануари", + "Февруари", + "Март", + "Април", + "Мај", + "Јуни", + "Јули", + "Август", + "Септември", + "Октомври", + "Ноември", + "Декември", + ] + month_abbreviations = [ + "", + "Јан", + "Фев", + "Мар", + "Апр", + "Мај", + "Јун", + "Јул", + "Авг", + "Септ", + "Окт", + "Ноем", + "Декем", + ] + + day_names = [ + "", + "Понеделник", + "Вторник", + "Среда", + "Четврток", + "Петок", + "Сабота", + "Недела", + ] + day_abbreviations = [ + "", + "Пон", + "Вт", + "Сре", + "Чет", + "Пет", + "Саб", + "Нед", + ] + + +class GermanBaseLocale(Locale): + past = "vor {0}" + future = "in {0}" + and_word = "und" + + timeframes = { + "now": "gerade eben", + "second": "einer Sekunde", + "seconds": "{0} Sekunden", + "minute": "einer Minute", + "minutes": "{0} Minuten", + "hour": "einer Stunde", + "hours": "{0} Stunden", + "day": "einem Tag", + "days": "{0} Tagen", + "week": "einer Woche", + "weeks": "{0} Wochen", + "month": "einem Monat", + "months": "{0} Monaten", + "year": "einem Jahr", + "years": "{0} Jahren", + } + + timeframes_only_distance = timeframes.copy() + timeframes_only_distance["second"] = "eine Sekunde" + timeframes_only_distance["minute"] = "eine Minute" + timeframes_only_distance["hour"] = "eine Stunde" + timeframes_only_distance["day"] = "ein Tag" + timeframes_only_distance["days"] = "{0} Tage" + timeframes_only_distance["week"] = "eine Woche" + timeframes_only_distance["month"] = "ein Monat" + timeframes_only_distance["months"] = "{0} Monate" + timeframes_only_distance["year"] = "ein Jahr" + timeframes_only_distance["years"] = "{0} Jahre" + + month_names = [ + "", + "Januar", + "Februar", + "März", + "April", + "Mai", + "Juni", + "Juli", + "August", + "September", + "Oktober", + "November", + "Dezember", + ] + + month_abbreviations = [ + "", + "Jan", + "Feb", + "Mär", + "Apr", + "Mai", + "Jun", + "Jul", + "Aug", + "Sep", + "Okt", + "Nov", + "Dez", + ] + + day_names = [ + "", + "Montag", + "Dienstag", + "Mittwoch", + "Donnerstag", + "Freitag", + "Samstag", + "Sonntag", + ] + + day_abbreviations = ["", "Mo", "Di", "Mi", "Do", "Fr", "Sa", "So"] + + def _ordinal_number(self, n: int) -> str: + return f"{n}." + + def describe( + self, + timeframe: TimeFrameLiteral, + delta: Union[int, float] = 0, + only_distance: bool = False, + ) -> str: + """Describes a delta within a timeframe in plain language. + + :param timeframe: a string representing a timeframe. + :param delta: a quantity representing a delta in a timeframe. + :param only_distance: return only distance eg: "11 seconds" without "in" or "ago" keywords + """ + + if not only_distance: + return super().describe(timeframe, delta, only_distance) + + # German uses a different case without 'in' or 'ago' + humanized = self.timeframes_only_distance[timeframe].format(trunc(abs(delta))) + + return humanized + + +class GermanLocale(GermanBaseLocale, Locale): + names = ["de", "de-de"] + + +class SwissLocale(GermanBaseLocale, Locale): + names = ["de-ch"] + + +class AustrianLocale(GermanBaseLocale, Locale): + names = ["de-at"] + + month_names = [ + "", + "Jänner", + "Februar", + "März", + "April", + "Mai", + "Juni", + "Juli", + "August", + "September", + "Oktober", + "November", + "Dezember", + ] + + +class NorwegianLocale(Locale): + names = ["nb", "nb-no"] + + past = "for {0} siden" + future = "om {0}" + + timeframes = { + "now": "nå nettopp", + "second": "ett sekund", + "seconds": "{0} sekunder", + "minute": "ett minutt", + "minutes": "{0} minutter", + "hour": "en time", + "hours": "{0} timer", + "day": "en dag", + "days": "{0} dager", + "week": "en uke", + "weeks": "{0} uker", + "month": "en måned", + "months": "{0} måneder", + "year": "ett år", + "years": "{0} år", + } + + month_names = [ + "", + "januar", + "februar", + "mars", + "april", + "mai", + "juni", + "juli", + "august", + "september", + "oktober", + "november", + "desember", + ] + month_abbreviations = [ + "", + "jan", + "feb", + "mar", + "apr", + "mai", + "jun", + "jul", + "aug", + "sep", + "okt", + "nov", + "des", + ] + + day_names = [ + "", + "mandag", + "tirsdag", + "onsdag", + "torsdag", + "fredag", + "lørdag", + "søndag", + ] + day_abbreviations = ["", "ma", "ti", "on", "to", "fr", "lø", "sø"] + + def _ordinal_number(self, n: int) -> str: + return f"{n}." + + +class NewNorwegianLocale(Locale): + names = ["nn", "nn-no"] + + past = "for {0} sidan" + future = "om {0}" + + timeframes = { + "now": "no nettopp", + "second": "eitt sekund", + "seconds": "{0} sekund", + "minute": "eitt minutt", + "minutes": "{0} minutt", + "hour": "ein time", + "hours": "{0} timar", + "day": "ein dag", + "days": "{0} dagar", + "week": "ei veke", + "weeks": "{0} veker", + "month": "ein månad", + "months": "{0} månader", + "year": "eitt år", + "years": "{0} år", + } + + month_names = [ + "", + "januar", + "februar", + "mars", + "april", + "mai", + "juni", + "juli", + "august", + "september", + "oktober", + "november", + "desember", + ] + month_abbreviations = [ + "", + "jan", + "feb", + "mar", + "apr", + "mai", + "jun", + "jul", + "aug", + "sep", + "okt", + "nov", + "des", + ] + + day_names = [ + "", + "måndag", + "tysdag", + "onsdag", + "torsdag", + "fredag", + "laurdag", + "sundag", + ] + day_abbreviations = ["", "må", "ty", "on", "to", "fr", "la", "su"] + + def _ordinal_number(self, n: int) -> str: + return f"{n}." + + +class PortugueseLocale(Locale): + names = ["pt", "pt-pt"] + + past = "há {0}" + future = "em {0}" + and_word = "e" + + timeframes = { + "now": "agora", + "second": "um segundo", + "seconds": "{0} segundos", + "minute": "um minuto", + "minutes": "{0} minutos", + "hour": "uma hora", + "hours": "{0} horas", + "day": "um dia", + "days": "{0} dias", + "week": "uma semana", + "weeks": "{0} semanas", + "month": "um mês", + "months": "{0} meses", + "year": "um ano", + "years": "{0} anos", + } + + month_names = [ + "", + "Janeiro", + "Fevereiro", + "Março", + "Abril", + "Maio", + "Junho", + "Julho", + "Agosto", + "Setembro", + "Outubro", + "Novembro", + "Dezembro", + ] + month_abbreviations = [ + "", + "Jan", + "Fev", + "Mar", + "Abr", + "Mai", + "Jun", + "Jul", + "Ago", + "Set", + "Out", + "Nov", + "Dez", + ] + + day_names = [ + "", + "Segunda-feira", + "Terça-feira", + "Quarta-feira", + "Quinta-feira", + "Sexta-feira", + "Sábado", + "Domingo", + ] + day_abbreviations = ["", "Seg", "Ter", "Qua", "Qui", "Sex", "Sab", "Dom"] + + +class BrazilianPortugueseLocale(PortugueseLocale): + names = ["pt-br"] + + past = "faz {0}" + + +class TagalogLocale(Locale): + names = ["tl", "tl-ph"] + + past = "nakaraang {0}" + future = "{0} mula ngayon" + + timeframes = { + "now": "ngayon lang", + "second": "isang segundo", + "seconds": "{0} segundo", + "minute": "isang minuto", + "minutes": "{0} minuto", + "hour": "isang oras", + "hours": "{0} oras", + "day": "isang araw", + "days": "{0} araw", + "week": "isang linggo", + "weeks": "{0} linggo", + "month": "isang buwan", + "months": "{0} buwan", + "year": "isang taon", + "years": "{0} taon", + } + + month_names = [ + "", + "Enero", + "Pebrero", + "Marso", + "Abril", + "Mayo", + "Hunyo", + "Hulyo", + "Agosto", + "Setyembre", + "Oktubre", + "Nobyembre", + "Disyembre", + ] + month_abbreviations = [ + "", + "Ene", + "Peb", + "Mar", + "Abr", + "May", + "Hun", + "Hul", + "Ago", + "Set", + "Okt", + "Nob", + "Dis", + ] + + day_names = [ + "", + "Lunes", + "Martes", + "Miyerkules", + "Huwebes", + "Biyernes", + "Sabado", + "Linggo", + ] + day_abbreviations = ["", "Lun", "Mar", "Miy", "Huw", "Biy", "Sab", "Lin"] + + meridians = {"am": "nu", "pm": "nh", "AM": "ng umaga", "PM": "ng hapon"} + + def _ordinal_number(self, n: int) -> str: + return f"ika-{n}" + + +class VietnameseLocale(Locale): + names = ["vi", "vi-vn"] + + past = "{0} trước" + future = "{0} nữa" + + timeframes = { + "now": "hiện tại", + "second": "một giây", + "seconds": "{0} giây", + "minute": "một phút", + "minutes": "{0} phút", + "hour": "một giờ", + "hours": "{0} giờ", + "day": "một ngày", + "days": "{0} ngày", + "week": "một tuần", + "weeks": "{0} tuần", + "month": "một tháng", + "months": "{0} tháng", + "year": "một năm", + "years": "{0} năm", + } + + month_names = [ + "", + "Tháng Một", + "Tháng Hai", + "Tháng Ba", + "Tháng Tư", + "Tháng Năm", + "Tháng Sáu", + "Tháng Bảy", + "Tháng Tám", + "Tháng Chín", + "Tháng Mười", + "Tháng Mười Một", + "Tháng Mười Hai", + ] + month_abbreviations = [ + "", + "Tháng 1", + "Tháng 2", + "Tháng 3", + "Tháng 4", + "Tháng 5", + "Tháng 6", + "Tháng 7", + "Tháng 8", + "Tháng 9", + "Tháng 10", + "Tháng 11", + "Tháng 12", + ] + + day_names = [ + "", + "Thứ Hai", + "Thứ Ba", + "Thứ Tư", + "Thứ Năm", + "Thứ Sáu", + "Thứ Bảy", + "Chủ Nhật", + ] + day_abbreviations = ["", "Thứ 2", "Thứ 3", "Thứ 4", "Thứ 5", "Thứ 6", "Thứ 7", "CN"] + + +class TurkishLocale(Locale): + names = ["tr", "tr-tr"] + + past = "{0} önce" + future = "{0} sonra" + and_word = "ve" + + timeframes = { + "now": "şimdi", + "second": "bir saniye", + "seconds": "{0} saniye", + "minute": "bir dakika", + "minutes": "{0} dakika", + "hour": "bir saat", + "hours": "{0} saat", + "day": "bir gün", + "days": "{0} gün", + "week": "bir hafta", + "weeks": "{0} hafta", + "month": "bir ay", + "months": "{0} ay", + "year": "bir yıl", + "years": "{0} yıl", + } + + meridians = {"am": "öö", "pm": "ös", "AM": "ÖÖ", "PM": "ÖS"} + + month_names = [ + "", + "Ocak", + "Şubat", + "Mart", + "Nisan", + "Mayıs", + "Haziran", + "Temmuz", + "Ağustos", + "Eylül", + "Ekim", + "Kasım", + "Aralık", + ] + month_abbreviations = [ + "", + "Oca", + "Şub", + "Mar", + "Nis", + "May", + "Haz", + "Tem", + "Ağu", + "Eyl", + "Eki", + "Kas", + "Ara", + ] + + day_names = [ + "", + "Pazartesi", + "Salı", + "Çarşamba", + "Perşembe", + "Cuma", + "Cumartesi", + "Pazar", + ] + day_abbreviations = ["", "Pzt", "Sal", "Çar", "Per", "Cum", "Cmt", "Paz"] + + +class AzerbaijaniLocale(Locale): + names = ["az", "az-az"] + + past = "{0} əvvəl" + future = "{0} sonra" + + timeframes = { + "now": "indi", + "second": "bir saniyə", + "seconds": "{0} saniyə", + "minute": "bir dəqiqə", + "minutes": "{0} dəqiqə", + "hour": "bir saat", + "hours": "{0} saat", + "day": "bir gün", + "days": "{0} gün", + "week": "bir həftə", + "weeks": "{0} həftə", + "month": "bir ay", + "months": "{0} ay", + "year": "bir il", + "years": "{0} il", + } + + month_names = [ + "", + "Yanvar", + "Fevral", + "Mart", + "Aprel", + "May", + "İyun", + "İyul", + "Avqust", + "Sentyabr", + "Oktyabr", + "Noyabr", + "Dekabr", + ] + month_abbreviations = [ + "", + "Yan", + "Fev", + "Mar", + "Apr", + "May", + "İyn", + "İyl", + "Avq", + "Sen", + "Okt", + "Noy", + "Dek", + ] + + day_names = [ + "", + "Bazar ertəsi", + "Çərşənbə axşamı", + "Çərşənbə", + "Cümə axşamı", + "Cümə", + "Şənbə", + "Bazar", + ] + day_abbreviations = ["", "Ber", "Çax", "Çər", "Cax", "Cüm", "Şnb", "Bzr"] + + +class ArabicLocale(Locale): + names = [ + "ar", + "ar-ae", + "ar-bh", + "ar-dj", + "ar-eg", + "ar-eh", + "ar-er", + "ar-km", + "ar-kw", + "ar-ly", + "ar-om", + "ar-qa", + "ar-sa", + "ar-sd", + "ar-so", + "ar-ss", + "ar-td", + "ar-ye", + ] + + past = "منذ {0}" + future = "خلال {0}" + + timeframes: ClassVar[Mapping[TimeFrameLiteral, Union[str, Mapping[str, str]]]] = { + "now": "الآن", + "second": "ثانية", + "seconds": {"2": "ثانيتين", "ten": "{0} ثوان", "higher": "{0} ثانية"}, + "minute": "دقيقة", + "minutes": {"2": "دقيقتين", "ten": "{0} دقائق", "higher": "{0} دقيقة"}, + "hour": "ساعة", + "hours": {"2": "ساعتين", "ten": "{0} ساعات", "higher": "{0} ساعة"}, + "day": "يوم", + "days": {"2": "يومين", "ten": "{0} أيام", "higher": "{0} يوم"}, + "month": "شهر", + "months": {"2": "شهرين", "ten": "{0} أشهر", "higher": "{0} شهر"}, + "year": "سنة", + "years": {"2": "سنتين", "ten": "{0} سنوات", "higher": "{0} سنة"}, + } + + month_names = [ + "", + "يناير", + "فبراير", + "مارس", + "أبريل", + "مايو", + "يونيو", + "يوليو", + "أغسطس", + "سبتمبر", + "أكتوبر", + "نوفمبر", + "ديسمبر", + ] + month_abbreviations = [ + "", + "يناير", + "فبراير", + "مارس", + "أبريل", + "مايو", + "يونيو", + "يوليو", + "أغسطس", + "سبتمبر", + "أكتوبر", + "نوفمبر", + "ديسمبر", + ] + + day_names = [ + "", + "الإثنين", + "الثلاثاء", + "الأربعاء", + "الخميس", + "الجمعة", + "السبت", + "الأحد", + ] + day_abbreviations = ["", "إثنين", "ثلاثاء", "أربعاء", "خميس", "جمعة", "سبت", "أحد"] + + def _format_timeframe(self, timeframe: TimeFrameLiteral, delta: int) -> str: + form = self.timeframes[timeframe] + delta = abs(delta) + if isinstance(form, Mapping): + if delta == 2: + form = form["2"] + elif 2 < delta <= 10: + form = form["ten"] + else: + form = form["higher"] + + return form.format(delta) + + +class LevantArabicLocale(ArabicLocale): + names = ["ar-iq", "ar-jo", "ar-lb", "ar-ps", "ar-sy"] + month_names = [ + "", + "كانون الثاني", + "شباط", + "آذار", + "نيسان", + "أيار", + "حزيران", + "تموز", + "آب", + "أيلول", + "تشرين الأول", + "تشرين الثاني", + "كانون الأول", + ] + month_abbreviations = [ + "", + "كانون الثاني", + "شباط", + "آذار", + "نيسان", + "أيار", + "حزيران", + "تموز", + "آب", + "أيلول", + "تشرين الأول", + "تشرين الثاني", + "كانون الأول", + ] + + +class AlgeriaTunisiaArabicLocale(ArabicLocale): + names = ["ar-tn", "ar-dz"] + month_names = [ + "", + "جانفي", + "فيفري", + "مارس", + "أفريل", + "ماي", + "جوان", + "جويلية", + "أوت", + "سبتمبر", + "أكتوبر", + "نوفمبر", + "ديسمبر", + ] + month_abbreviations = [ + "", + "جانفي", + "فيفري", + "مارس", + "أفريل", + "ماي", + "جوان", + "جويلية", + "أوت", + "سبتمبر", + "أكتوبر", + "نوفمبر", + "ديسمبر", + ] + + +class MauritaniaArabicLocale(ArabicLocale): + names = ["ar-mr"] + month_names = [ + "", + "يناير", + "فبراير", + "مارس", + "إبريل", + "مايو", + "يونيو", + "يوليو", + "أغشت", + "شتمبر", + "أكتوبر", + "نوفمبر", + "دجمبر", + ] + month_abbreviations = [ + "", + "يناير", + "فبراير", + "مارس", + "إبريل", + "مايو", + "يونيو", + "يوليو", + "أغشت", + "شتمبر", + "أكتوبر", + "نوفمبر", + "دجمبر", + ] + + +class MoroccoArabicLocale(ArabicLocale): + names = ["ar-ma"] + month_names = [ + "", + "يناير", + "فبراير", + "مارس", + "أبريل", + "ماي", + "يونيو", + "يوليوز", + "غشت", + "شتنبر", + "أكتوبر", + "نونبر", + "دجنبر", + ] + month_abbreviations = [ + "", + "يناير", + "فبراير", + "مارس", + "أبريل", + "ماي", + "يونيو", + "يوليوز", + "غشت", + "شتنبر", + "أكتوبر", + "نونبر", + "دجنبر", + ] + + +class IcelandicLocale(Locale): + def _format_timeframe(self, timeframe: TimeFrameLiteral, delta: int) -> str: + form = self.timeframes[timeframe] + + if isinstance(form, Mapping): + if delta < 0: + form = form["past"] + elif delta > 0: + form = form["future"] + else: + raise ValueError( + "Icelandic Locale does not support units with a delta of zero. " + "Please consider making a contribution to fix this issue." + ) + # FIXME: handle when delta is 0 + + return form.format(abs(delta)) + + names = ["is", "is-is"] + + past = "fyrir {0} síðan" + future = "eftir {0}" + + timeframes: ClassVar[Mapping[TimeFrameLiteral, Union[str, Mapping[str, str]]]] = { + "now": "rétt í þessu", + "second": {"past": "sekúndu", "future": "sekúndu"}, + "seconds": {"past": "{0} nokkrum sekúndum", "future": "nokkrar sekúndur"}, + "minute": {"past": "einni mínútu", "future": "eina mínútu"}, + "minutes": {"past": "{0} mínútum", "future": "{0} mínútur"}, + "hour": {"past": "einum tíma", "future": "einn tíma"}, + "hours": {"past": "{0} tímum", "future": "{0} tíma"}, + "day": {"past": "einum degi", "future": "einn dag"}, + "days": {"past": "{0} dögum", "future": "{0} daga"}, + "month": {"past": "einum mánuði", "future": "einn mánuð"}, + "months": {"past": "{0} mánuðum", "future": "{0} mánuði"}, + "year": {"past": "einu ári", "future": "eitt ár"}, + "years": {"past": "{0} árum", "future": "{0} ár"}, + } + + meridians = {"am": "f.h.", "pm": "e.h.", "AM": "f.h.", "PM": "e.h."} + + month_names = [ + "", + "janúar", + "febrúar", + "mars", + "apríl", + "maí", + "júní", + "júlí", + "ágúst", + "september", + "október", + "nóvember", + "desember", + ] + month_abbreviations = [ + "", + "jan", + "feb", + "mar", + "apr", + "maí", + "jún", + "júl", + "ágú", + "sep", + "okt", + "nóv", + "des", + ] + + day_names = [ + "", + "mánudagur", + "þriðjudagur", + "miðvikudagur", + "fimmtudagur", + "föstudagur", + "laugardagur", + "sunnudagur", + ] + day_abbreviations = ["", "mán", "þri", "mið", "fim", "fös", "lau", "sun"] + + +class DanishLocale(Locale): + names = ["da", "da-dk"] + + past = "for {0} siden" + future = "om {0}" + and_word = "og" + + timeframes = { + "now": "lige nu", + "second": "et sekund", + "seconds": "{0} sekunder", + "minute": "et minut", + "minutes": "{0} minutter", + "hour": "en time", + "hours": "{0} timer", + "day": "en dag", + "days": "{0} dage", + "week": "en uge", + "weeks": "{0} uger", + "month": "en måned", + "months": "{0} måneder", + "year": "et år", + "years": "{0} år", + } + + month_names = [ + "", + "januar", + "februar", + "marts", + "april", + "maj", + "juni", + "juli", + "august", + "september", + "oktober", + "november", + "december", + ] + month_abbreviations = [ + "", + "jan", + "feb", + "mar", + "apr", + "maj", + "jun", + "jul", + "aug", + "sep", + "okt", + "nov", + "dec", + ] + + day_names = [ + "", + "mandag", + "tirsdag", + "onsdag", + "torsdag", + "fredag", + "lørdag", + "søndag", + ] + day_abbreviations = ["", "man", "tir", "ons", "tor", "fre", "lør", "søn"] + + def _ordinal_number(self, n: int) -> str: + return f"{n}." + + +class MalayalamLocale(Locale): + names = ["ml"] + + past = "{0} മുമ്പ്" + future = "{0} ശേഷം" + + timeframes = { + "now": "ഇപ്പോൾ", + "second": "ഒരു നിമിഷം", + "seconds": "{0} സെക്കന്റ്‌", + "minute": "ഒരു മിനിറ്റ്", + "minutes": "{0} മിനിറ്റ്", + "hour": "ഒരു മണിക്കൂർ", + "hours": "{0} മണിക്കൂർ", + "day": "ഒരു ദിവസം ", + "days": "{0} ദിവസം ", + "month": "ഒരു മാസം ", + "months": "{0} മാസം ", + "year": "ഒരു വർഷം ", + "years": "{0} വർഷം ", + } + + meridians = { + "am": "രാവിലെ", + "pm": "ഉച്ചക്ക് ശേഷം", + "AM": "രാവിലെ", + "PM": "ഉച്ചക്ക് ശേഷം", + } + + month_names = [ + "", + "ജനുവരി", + "ഫെബ്രുവരി", + "മാർച്ച്‌", + "ഏപ്രിൽ ", + "മെയ്‌ ", + "ജൂണ്‍", + "ജൂലൈ", + "ഓഗസ്റ്റ്‌", + "സെപ്റ്റംബർ", + "ഒക്ടോബർ", + "നവംബർ", + "ഡിസംബർ", + ] + month_abbreviations = [ + "", + "ജനു", + "ഫെബ് ", + "മാർ", + "ഏപ്രിൽ", + "മേയ്", + "ജൂണ്‍", + "ജൂലൈ", + "ഓഗസ്റ", + "സെപ്റ്റ", + "ഒക്ടോ", + "നവം", + "ഡിസം", + ] + + day_names = ["", "തിങ്കള്‍", "ചൊവ്വ", "ബുധന്‍", "വ്യാഴം", "വെള്ളി", "ശനി", "ഞായര്‍"] + day_abbreviations = [ + "", + "തിങ്കള്‍", + "ചൊവ്വ", + "ബുധന്‍", + "വ്യാഴം", + "വെള്ളി", + "ശനി", + "ഞായര്‍", + ] + + +class HindiLocale(Locale): + names = ["hi", "hi-in"] + + past = "{0} पहले" + future = "{0} बाद" + + timeframes = { + "now": "अभी", + "second": "एक पल", + "seconds": "{0} सेकंड्", + "minute": "एक मिनट ", + "minutes": "{0} मिनट ", + "hour": "एक घंटा", + "hours": "{0} घंटे", + "day": "एक दिन", + "days": "{0} दिन", + "month": "एक माह ", + "months": "{0} महीने ", + "year": "एक वर्ष ", + "years": "{0} साल ", + } + + meridians = {"am": "सुबह", "pm": "शाम", "AM": "सुबह", "PM": "शाम"} + + month_names = [ + "", + "जनवरी", + "फरवरी", + "मार्च", + "अप्रैल ", + "मई", + "जून", + "जुलाई", + "अगस्त", + "सितंबर", + "अक्टूबर", + "नवंबर", + "दिसंबर", + ] + month_abbreviations = [ + "", + "जन", + "फ़र", + "मार्च", + "अप्रै", + "मई", + "जून", + "जुलाई", + "आग", + "सित", + "अकत", + "नवे", + "दिस", + ] + + day_names = [ + "", + "सोमवार", + "मंगलवार", + "बुधवार", + "गुरुवार", + "शुक्रवार", + "शनिवार", + "रविवार", + ] + day_abbreviations = ["", "सोम", "मंगल", "बुध", "गुरुवार", "शुक्र", "शनि", "रवि"] + + +class CzechLocale(Locale): + names = ["cs", "cs-cz"] + + timeframes: ClassVar[Mapping[TimeFrameLiteral, Union[str, Mapping[str, str]]]] = { + "now": "Teď", + "second": {"past": "vteřina", "future": "vteřina"}, + "seconds": { + "zero": "vteřina", + "past": "{0} sekundami", + "future-singular": "{0} sekundy", + "future-paucal": "{0} sekund", + }, + "minute": {"past": "minutou", "future": "minutu"}, + "minutes": { + "zero": "{0} minut", + "past": "{0} minutami", + "future-singular": "{0} minuty", + "future-paucal": "{0} minut", + }, + "hour": {"past": "hodinou", "future": "hodinu"}, + "hours": { + "zero": "{0} hodin", + "past": "{0} hodinami", + "future-singular": "{0} hodiny", + "future-paucal": "{0} hodin", + }, + "day": {"past": "dnem", "future": "den"}, + "days": { + "zero": "{0} dnů", + "past": "{0} dny", + "future-singular": "{0} dny", + "future-paucal": "{0} dnů", + }, + "week": {"past": "týdnem", "future": "týden"}, + "weeks": { + "zero": "{0} týdnů", + "past": "{0} týdny", + "future-singular": "{0} týdny", + "future-paucal": "{0} týdnů", + }, + "month": {"past": "měsícem", "future": "měsíc"}, + "months": { + "zero": "{0} měsíců", + "past": "{0} měsíci", + "future-singular": "{0} měsíce", + "future-paucal": "{0} měsíců", + }, + "year": {"past": "rokem", "future": "rok"}, + "years": { + "zero": "{0} let", + "past": "{0} lety", + "future-singular": "{0} roky", + "future-paucal": "{0} let", + }, + } + + past = "Před {0}" + future = "Za {0}" + + month_names = [ + "", + "leden", + "únor", + "březen", + "duben", + "květen", + "červen", + "červenec", + "srpen", + "září", + "říjen", + "listopad", + "prosinec", + ] + month_abbreviations = [ + "", + "led", + "úno", + "bře", + "dub", + "kvě", + "čvn", + "čvc", + "srp", + "zář", + "říj", + "lis", + "pro", + ] + + day_names = [ + "", + "pondělí", + "úterý", + "středa", + "čtvrtek", + "pátek", + "sobota", + "neděle", + ] + day_abbreviations = ["", "po", "út", "st", "čt", "pá", "so", "ne"] + + def _format_timeframe(self, timeframe: TimeFrameLiteral, delta: int) -> str: + """Czech aware time frame format function, takes into account + the differences between past and future forms.""" + abs_delta = abs(delta) + form = self.timeframes[timeframe] + + if isinstance(form, str): + return form.format(abs_delta) + + if delta == 0: + key = "zero" # And *never* use 0 in the singular! + elif delta < 0: + key = "past" + else: + # Needed since both regular future and future-singular and future-paucal cases + if "future-singular" not in form: + key = "future" + elif 2 <= abs_delta % 10 <= 4 and ( + abs_delta % 100 < 10 or abs_delta % 100 >= 20 + ): + key = "future-singular" + else: + key = "future-paucal" + + form: str = form[key] + return form.format(abs_delta) + + +class SlovakLocale(Locale): + names = ["sk", "sk-sk"] + + timeframes: ClassVar[Mapping[TimeFrameLiteral, Union[str, Mapping[str, str]]]] = { + "now": "Teraz", + "second": {"past": "sekundou", "future": "sekundu"}, + "seconds": { + "zero": "{0} sekúnd", + "past": "{0} sekundami", + "future-singular": "{0} sekundy", + "future-paucal": "{0} sekúnd", + }, + "minute": {"past": "minútou", "future": "minútu"}, + "minutes": { + "zero": "{0} minút", + "past": "{0} minútami", + "future-singular": "{0} minúty", + "future-paucal": "{0} minút", + }, + "hour": {"past": "hodinou", "future": "hodinu"}, + "hours": { + "zero": "{0} hodín", + "past": "{0} hodinami", + "future-singular": "{0} hodiny", + "future-paucal": "{0} hodín", + }, + "day": {"past": "dňom", "future": "deň"}, + "days": { + "zero": "{0} dní", + "past": "{0} dňami", + "future-singular": "{0} dni", + "future-paucal": "{0} dní", + }, + "week": {"past": "týždňom", "future": "týždeň"}, + "weeks": { + "zero": "{0} týždňov", + "past": "{0} týždňami", + "future-singular": "{0} týždne", + "future-paucal": "{0} týždňov", + }, + "month": {"past": "mesiacom", "future": "mesiac"}, + "months": { + "zero": "{0} mesiacov", + "past": "{0} mesiacmi", + "future-singular": "{0} mesiace", + "future-paucal": "{0} mesiacov", + }, + "year": {"past": "rokom", "future": "rok"}, + "years": { + "zero": "{0} rokov", + "past": "{0} rokmi", + "future-singular": "{0} roky", + "future-paucal": "{0} rokov", + }, + } + + past = "Pred {0}" + future = "O {0}" + and_word = "a" + + month_names = [ + "", + "január", + "február", + "marec", + "apríl", + "máj", + "jún", + "júl", + "august", + "september", + "október", + "november", + "december", + ] + month_abbreviations = [ + "", + "jan", + "feb", + "mar", + "apr", + "máj", + "jún", + "júl", + "aug", + "sep", + "okt", + "nov", + "dec", + ] + + day_names = [ + "", + "pondelok", + "utorok", + "streda", + "štvrtok", + "piatok", + "sobota", + "nedeľa", + ] + day_abbreviations = ["", "po", "ut", "st", "št", "pi", "so", "ne"] + + def _format_timeframe(self, timeframe: TimeFrameLiteral, delta: int) -> str: + """Slovak aware time frame format function, takes into account + the differences between past and future forms.""" + abs_delta = abs(delta) + form = self.timeframes[timeframe] + + if isinstance(form, str): + return form.format(abs_delta) + + if delta == 0: + key = "zero" # And *never* use 0 in the singular! + elif delta < 0: + key = "past" + else: + if "future-singular" not in form: + key = "future" + elif 2 <= abs_delta % 10 <= 4 and ( + abs_delta % 100 < 10 or abs_delta % 100 >= 20 + ): + key = "future-singular" + else: + key = "future-paucal" + + form: str = form[key] + return form.format(abs_delta) + + +class FarsiLocale(Locale): + names = ["fa", "fa-ir"] + + past = "{0} قبل" + future = "در {0}" + + timeframes = { + "now": "اکنون", + "second": "یک لحظه", + "seconds": "{0} ثانیه", + "minute": "یک دقیقه", + "minutes": "{0} دقیقه", + "hour": "یک ساعت", + "hours": "{0} ساعت", + "day": "یک روز", + "days": "{0} روز", + "month": "یک ماه", + "months": "{0} ماه", + "year": "یک سال", + "years": "{0} سال", + } + + meridians = { + "am": "قبل از ظهر", + "pm": "بعد از ظهر", + "AM": "قبل از ظهر", + "PM": "بعد از ظهر", + } + + month_names = [ + "", + "January", + "February", + "March", + "April", + "May", + "June", + "July", + "August", + "September", + "October", + "November", + "December", + ] + month_abbreviations = [ + "", + "Jan", + "Feb", + "Mar", + "Apr", + "May", + "Jun", + "Jul", + "Aug", + "Sep", + "Oct", + "Nov", + "Dec", + ] + + day_names = [ + "", + "دو شنبه", + "سه شنبه", + "چهارشنبه", + "پنجشنبه", + "جمعه", + "شنبه", + "یکشنبه", + ] + day_abbreviations = ["", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"] + + +class HebrewLocale(Locale): + names = ["he", "he-il"] + + past = "לפני {0}" + future = "בעוד {0}" + and_word = "ו" + + timeframes: ClassVar[Mapping[TimeFrameLiteral, Union[str, Mapping[str, str]]]] = { + "now": "הרגע", + "second": "שנייה", + "seconds": "{0} שניות", + "minute": "דקה", + "minutes": "{0} דקות", + "hour": "שעה", + "hours": {"2": "שעתיים", "ten": "{0} שעות", "higher": "{0} שעות"}, + "day": "יום", + "days": {"2": "יומיים", "ten": "{0} ימים", "higher": "{0} יום"}, + "week": "שבוע", + "weeks": {"2": "שבועיים", "ten": "{0} שבועות", "higher": "{0} שבועות"}, + "month": "חודש", + "months": {"2": "חודשיים", "ten": "{0} חודשים", "higher": "{0} חודשים"}, + "year": "שנה", + "years": {"2": "שנתיים", "ten": "{0} שנים", "higher": "{0} שנה"}, + } + + meridians = { + "am": 'לפנ"צ', + "pm": 'אחר"צ', + "AM": "לפני הצהריים", + "PM": "אחרי הצהריים", + } + + month_names = [ + "", + "ינואר", + "פברואר", + "מרץ", + "אפריל", + "מאי", + "יוני", + "יולי", + "אוגוסט", + "ספטמבר", + "אוקטובר", + "נובמבר", + "דצמבר", + ] + month_abbreviations = [ + "", + "ינו׳", + "פבר׳", + "מרץ", + "אפר׳", + "מאי", + "יוני", + "יולי", + "אוג׳", + "ספט׳", + "אוק׳", + "נוב׳", + "דצמ׳", + ] + + day_names = ["", "שני", "שלישי", "רביעי", "חמישי", "שישי", "שבת", "ראשון"] + day_abbreviations = ["", "ב׳", "ג׳", "ד׳", "ה׳", "ו׳", "ש׳", "א׳"] + + def _format_timeframe(self, timeframe: TimeFrameLiteral, delta: int) -> str: + form = self.timeframes[timeframe] + delta = abs(delta) + if isinstance(form, Mapping): + if delta == 2: + form = form["2"] + elif delta == 0 or 2 < delta <= 10: + form = form["ten"] + else: + form = form["higher"] + + return form.format(delta) + + def describe_multi( + self, + timeframes: Sequence[Tuple[TimeFrameLiteral, Union[int, float]]], + only_distance: bool = False, + ) -> str: + """Describes a delta within multiple timeframes in plain language. + In Hebrew, the and word behaves a bit differently. + + :param timeframes: a list of string, quantity pairs each representing a timeframe and delta. + :param only_distance: return only distance eg: "2 hours and 11 seconds" without "in" or "ago" keywords + """ + + humanized = "" + for index, (timeframe, delta) in enumerate(timeframes): + last_humanized = self._format_timeframe(timeframe, trunc(delta)) + if index == 0: + humanized = last_humanized + elif index == len(timeframes) - 1: # Must have at least 2 items + humanized += " " + self.and_word + if last_humanized[0].isdecimal(): + humanized += "־" + humanized += last_humanized + else: # Don't add for the last one + humanized += ", " + last_humanized + + if not only_distance: + humanized = self._format_relative(humanized, timeframe, trunc(delta)) + + return humanized + + +class MarathiLocale(Locale): + names = ["mr"] + + past = "{0} आधी" + future = "{0} नंतर" + + timeframes = { + "now": "सद्य", + "second": "एक सेकंद", + "seconds": "{0} सेकंद", + "minute": "एक मिनिट ", + "minutes": "{0} मिनिट ", + "hour": "एक तास", + "hours": "{0} तास", + "day": "एक दिवस", + "days": "{0} दिवस", + "month": "एक महिना ", + "months": "{0} महिने ", + "year": "एक वर्ष ", + "years": "{0} वर्ष ", + } + + meridians = {"am": "सकाळ", "pm": "संध्याकाळ", "AM": "सकाळ", "PM": "संध्याकाळ"} + + month_names = [ + "", + "जानेवारी", + "फेब्रुवारी", + "मार्च", + "एप्रिल", + "मे", + "जून", + "जुलै", + "अॉगस्ट", + "सप्टेंबर", + "अॉक्टोबर", + "नोव्हेंबर", + "डिसेंबर", + ] + month_abbreviations = [ + "", + "जान", + "फेब्रु", + "मार्च", + "एप्रि", + "मे", + "जून", + "जुलै", + "अॉग", + "सप्टें", + "अॉक्टो", + "नोव्हें", + "डिसें", + ] + + day_names = [ + "", + "सोमवार", + "मंगळवार", + "बुधवार", + "गुरुवार", + "शुक्रवार", + "शनिवार", + "रविवार", + ] + day_abbreviations = ["", "सोम", "मंगळ", "बुध", "गुरु", "शुक्र", "शनि", "रवि"] + + +class CatalanLocale(Locale): + names = ["ca", "ca-es", "ca-ad", "ca-fr", "ca-it"] + past = "Fa {0}" + future = "En {0}" + and_word = "i" + + timeframes = { + "now": "Ara mateix", + "second": "un segon", + "seconds": "{0} segons", + "minute": "un minut", + "minutes": "{0} minuts", + "hour": "una hora", + "hours": "{0} hores", + "day": "un dia", + "days": "{0} dies", + "month": "un mes", + "months": "{0} mesos", + "year": "un any", + "years": "{0} anys", + } + + month_names = [ + "", + "gener", + "febrer", + "març", + "abril", + "maig", + "juny", + "juliol", + "agost", + "setembre", + "octubre", + "novembre", + "desembre", + ] + month_abbreviations = [ + "", + "gen.", + "febr.", + "març", + "abr.", + "maig", + "juny", + "jul.", + "ag.", + "set.", + "oct.", + "nov.", + "des.", + ] + day_names = [ + "", + "dilluns", + "dimarts", + "dimecres", + "dijous", + "divendres", + "dissabte", + "diumenge", + ] + day_abbreviations = [ + "", + "dl.", + "dt.", + "dc.", + "dj.", + "dv.", + "ds.", + "dg.", + ] + + +class BasqueLocale(Locale): + names = ["eu", "eu-eu"] + past = "duela {0}" + future = "{0}" # I don't know what's the right phrase in Basque for the future. + + timeframes = { + "now": "Orain", + "second": "segundo bat", + "seconds": "{0} segundu", + "minute": "minutu bat", + "minutes": "{0} minutu", + "hour": "ordu bat", + "hours": "{0} ordu", + "day": "egun bat", + "days": "{0} egun", + "month": "hilabete bat", + "months": "{0} hilabet", + "year": "urte bat", + "years": "{0} urte", + } + + month_names = [ + "", + "urtarrilak", + "otsailak", + "martxoak", + "apirilak", + "maiatzak", + "ekainak", + "uztailak", + "abuztuak", + "irailak", + "urriak", + "azaroak", + "abenduak", + ] + month_abbreviations = [ + "", + "urt", + "ots", + "mar", + "api", + "mai", + "eka", + "uzt", + "abu", + "ira", + "urr", + "aza", + "abe", + ] + day_names = [ + "", + "astelehena", + "asteartea", + "asteazkena", + "osteguna", + "ostirala", + "larunbata", + "igandea", + ] + day_abbreviations = ["", "al", "ar", "az", "og", "ol", "lr", "ig"] + + +class HungarianLocale(Locale): + names = ["hu", "hu-hu"] + + past = "{0} ezelőtt" + future = "{0} múlva" + + timeframes: ClassVar[Mapping[TimeFrameLiteral, Union[str, Mapping[str, str]]]] = { + "now": "éppen most", + "second": {"past": "egy második", "future": "egy második"}, + "seconds": {"past": "{0} másodpercekkel", "future": "{0} pár másodperc"}, + "minute": {"past": "egy perccel", "future": "egy perc"}, + "minutes": {"past": "{0} perccel", "future": "{0} perc"}, + "hour": {"past": "egy órával", "future": "egy óra"}, + "hours": {"past": "{0} órával", "future": "{0} óra"}, + "day": {"past": "egy nappal", "future": "egy nap"}, + "days": {"past": "{0} nappal", "future": "{0} nap"}, + "month": {"past": "egy hónappal", "future": "egy hónap"}, + "months": {"past": "{0} hónappal", "future": "{0} hónap"}, + "year": {"past": "egy évvel", "future": "egy év"}, + "years": {"past": "{0} évvel", "future": "{0} év"}, + } + + month_names = [ + "", + "január", + "február", + "március", + "április", + "május", + "június", + "július", + "augusztus", + "szeptember", + "október", + "november", + "december", + ] + month_abbreviations = [ + "", + "jan", + "febr", + "márc", + "ápr", + "máj", + "jún", + "júl", + "aug", + "szept", + "okt", + "nov", + "dec", + ] + + day_names = [ + "", + "hétfő", + "kedd", + "szerda", + "csütörtök", + "péntek", + "szombat", + "vasárnap", + ] + day_abbreviations = ["", "hét", "kedd", "szer", "csüt", "pént", "szom", "vas"] + + meridians = {"am": "de", "pm": "du", "AM": "DE", "PM": "DU"} + + def _format_timeframe(self, timeframe: TimeFrameLiteral, delta: int) -> str: + form = self.timeframes[timeframe] + + if isinstance(form, Mapping): + if delta > 0: + form = form["future"] + else: + form = form["past"] + + return form.format(abs(delta)) + + +class EsperantoLocale(Locale): + names = ["eo", "eo-xx"] + past = "antaŭ {0}" + future = "post {0}" + + timeframes = { + "now": "nun", + "second": "sekundo", + "seconds": "{0} kelkaj sekundoj", + "minute": "unu minuto", + "minutes": "{0} minutoj", + "hour": "un horo", + "hours": "{0} horoj", + "day": "unu tago", + "days": "{0} tagoj", + "month": "unu monato", + "months": "{0} monatoj", + "year": "unu jaro", + "years": "{0} jaroj", + } + + month_names = [ + "", + "januaro", + "februaro", + "marto", + "aprilo", + "majo", + "junio", + "julio", + "aŭgusto", + "septembro", + "oktobro", + "novembro", + "decembro", + ] + month_abbreviations = [ + "", + "jan", + "feb", + "mar", + "apr", + "maj", + "jun", + "jul", + "aŭg", + "sep", + "okt", + "nov", + "dec", + ] + + day_names = [ + "", + "lundo", + "mardo", + "merkredo", + "ĵaŭdo", + "vendredo", + "sabato", + "dimanĉo", + ] + day_abbreviations = ["", "lun", "mar", "mer", "ĵaŭ", "ven", "sab", "dim"] + + meridians = {"am": "atm", "pm": "ptm", "AM": "ATM", "PM": "PTM"} + + ordinal_day_re = r"((?P[1-3]?[0-9](?=a))a)" + + def _ordinal_number(self, n: int) -> str: + return f"{n}a" + + +class ThaiLocale(Locale): + names = ["th", "th-th"] + + past = "{0} ที่ผ่านมา" + future = "ในอีก {0}" + + timeframes = { + "now": "ขณะนี้", + "second": "วินาที", + "seconds": "{0} ไม่กี่วินาที", + "minute": "1 นาที", + "minutes": "{0} นาที", + "hour": "1 ชั่วโมง", + "hours": "{0} ชั่วโมง", + "day": "1 วัน", + "days": "{0} วัน", + "month": "1 เดือน", + "months": "{0} เดือน", + "year": "1 ปี", + "years": "{0} ปี", + } + + month_names = [ + "", + "มกราคม", + "กุมภาพันธ์", + "มีนาคม", + "เมษายน", + "พฤษภาคม", + "มิถุนายน", + "กรกฎาคม", + "สิงหาคม", + "กันยายน", + "ตุลาคม", + "พฤศจิกายน", + "ธันวาคม", + ] + month_abbreviations = [ + "", + "ม.ค.", + "ก.พ.", + "มี.ค.", + "เม.ย.", + "พ.ค.", + "มิ.ย.", + "ก.ค.", + "ส.ค.", + "ก.ย.", + "ต.ค.", + "พ.ย.", + "ธ.ค.", + ] + + day_names = ["", "จันทร์", "อังคาร", "พุธ", "พฤหัสบดี", "ศุกร์", "เสาร์", "อาทิตย์"] + day_abbreviations = ["", "จ", "อ", "พ", "พฤ", "ศ", "ส", "อา"] + + meridians = {"am": "am", "pm": "pm", "AM": "AM", "PM": "PM"} + + BE_OFFSET = 543 + + def year_full(self, year: int) -> str: + """Thai always use Buddhist Era (BE) which is CE + 543""" + year += self.BE_OFFSET + return f"{year:04d}" + + def year_abbreviation(self, year: int) -> str: + """Thai always use Buddhist Era (BE) which is CE + 543""" + year += self.BE_OFFSET + return f"{year:04d}"[2:] + + def _format_relative( + self, + humanized: str, + timeframe: TimeFrameLiteral, + delta: Union[float, int], + ) -> str: + """Thai normally doesn't have any space between words""" + if timeframe == "now": + return humanized + + direction = self.past if delta < 0 else self.future + relative_string = direction.format(humanized) + + if timeframe == "seconds": + relative_string = relative_string.replace(" ", "") + + return relative_string + + +class LaotianLocale(Locale): + + names = ["lo", "lo-la"] + + past = "{0} ກ່ອນຫນ້ານີ້" + future = "ໃນ {0}" + + timeframes = { + "now": "ດຽວນີ້", + "second": "ວິນາທີ", + "seconds": "{0} ວິນາທີ", + "minute": "ນາທີ", + "minutes": "{0} ນາທີ", + "hour": "ຊົ່ວໂມງ", + "hours": "{0} ຊົ່ວໂມງ", + "day": "ມື້", + "days": "{0} ມື້", + "week": "ອາທິດ", + "weeks": "{0} ອາທິດ", + "month": "ເດືອນ", + "months": "{0} ເດືອນ", + "year": "ປີ", + "years": "{0} ປີ", + } + + month_names = [ + "", + "ມັງກອນ", # mangkon + "ກຸມພາ", # kumpha + "ມີນາ", # mina + "ເມສາ", # mesa + "ພຶດສະພາ", # phudsapha + "ມິຖຸນາ", # mithuna + "ກໍລະກົດ", # kolakod + "ສິງຫາ", # singha + "ກັນຍາ", # knaia + "ຕຸລາ", # tula + "ພະຈິກ", # phachik + "ທັນວາ", # thanuaa + ] + month_abbreviations = [ + "", + "ມັງກອນ", + "ກຸມພາ", + "ມີນາ", + "ເມສາ", + "ພຶດສະພາ", + "ມິຖຸນາ", + "ກໍລະກົດ", + "ສິງຫາ", + "ກັນຍາ", + "ຕຸລາ", + "ພະຈິກ", + "ທັນວາ", + ] + + day_names = [ + "", + "ວັນຈັນ", # vanchan + "ວັນອັງຄານ", # vnoangkhan + "ວັນພຸດ", # vanphud + "ວັນພະຫັດ", # vanphahad + "ວັນ​ສຸກ", # vansuk + "ວັນເສົາ", # vansao + "ວັນອາທິດ", # vnoathid + ] + day_abbreviations = [ + "", + "ວັນຈັນ", + "ວັນອັງຄານ", + "ວັນພຸດ", + "ວັນພະຫັດ", + "ວັນ​ສຸກ", + "ວັນເສົາ", + "ວັນອາທິດ", + ] + + BE_OFFSET = 543 + + def year_full(self, year: int) -> str: + """Lao always use Buddhist Era (BE) which is CE + 543""" + year += self.BE_OFFSET + return f"{year:04d}" + + def year_abbreviation(self, year: int) -> str: + """Lao always use Buddhist Era (BE) which is CE + 543""" + year += self.BE_OFFSET + return f"{year:04d}"[2:] + + def _format_relative( + self, + humanized: str, + timeframe: TimeFrameLiteral, + delta: Union[float, int], + ) -> str: + """Lao normally doesn't have any space between words""" + if timeframe == "now": + return humanized + + direction = self.past if delta < 0 else self.future + relative_string = direction.format(humanized) + + if timeframe == "seconds": + relative_string = relative_string.replace(" ", "") + + return relative_string + + +class BengaliLocale(Locale): + names = ["bn", "bn-bd", "bn-in"] + + past = "{0} আগে" + future = "{0} পরে" + + timeframes = { + "now": "এখন", + "second": "একটি দ্বিতীয়", + "seconds": "{0} সেকেন্ড", + "minute": "এক মিনিট", + "minutes": "{0} মিনিট", + "hour": "এক ঘণ্টা", + "hours": "{0} ঘণ্টা", + "day": "এক দিন", + "days": "{0} দিন", + "month": "এক মাস", + "months": "{0} মাস ", + "year": "এক বছর", + "years": "{0} বছর", + } + + meridians = {"am": "সকাল", "pm": "বিকাল", "AM": "সকাল", "PM": "বিকাল"} + + month_names = [ + "", + "জানুয়ারি", + "ফেব্রুয়ারি", + "মার্চ", + "এপ্রিল", + "মে", + "জুন", + "জুলাই", + "আগস্ট", + "সেপ্টেম্বর", + "অক্টোবর", + "নভেম্বর", + "ডিসেম্বর", + ] + month_abbreviations = [ + "", + "জানু", + "ফেব", + "মার্চ", + "এপ্রি", + "মে", + "জুন", + "জুল", + "অগা", + "সেপ্ট", + "অক্টো", + "নভে", + "ডিসে", + ] + + day_names = [ + "", + "সোমবার", + "মঙ্গলবার", + "বুধবার", + "বৃহস্পতিবার", + "শুক্রবার", + "শনিবার", + "রবিবার", + ] + day_abbreviations = ["", "সোম", "মঙ্গল", "বুধ", "বৃহঃ", "শুক্র", "শনি", "রবি"] + + def _ordinal_number(self, n: int) -> str: + if n > 10 or n == 0: + return f"{n}তম" + if n in [1, 5, 7, 8, 9, 10]: + return f"{n}ম" + if n in [2, 3]: + return f"{n}য়" + if n == 4: + return f"{n}র্থ" + if n == 6: + return f"{n}ষ্ঠ" + + +class RomanshLocale(Locale): + names = ["rm", "rm-ch"] + + past = "avant {0}" + future = "en {0}" + + timeframes = { + "now": "en quest mument", + "second": "in secunda", + "seconds": "{0} secundas", + "minute": "ina minuta", + "minutes": "{0} minutas", + "hour": "in'ura", + "hours": "{0} ura", + "day": "in di", + "days": "{0} dis", + "month": "in mais", + "months": "{0} mais", + "year": "in onn", + "years": "{0} onns", + } + + month_names = [ + "", + "schaner", + "favrer", + "mars", + "avrigl", + "matg", + "zercladur", + "fanadur", + "avust", + "settember", + "october", + "november", + "december", + ] + + month_abbreviations = [ + "", + "schan", + "fav", + "mars", + "avr", + "matg", + "zer", + "fan", + "avu", + "set", + "oct", + "nov", + "dec", + ] + + day_names = [ + "", + "glindesdi", + "mardi", + "mesemna", + "gievgia", + "venderdi", + "sonda", + "dumengia", + ] + + day_abbreviations = ["", "gli", "ma", "me", "gie", "ve", "so", "du"] + + +class RomanianLocale(Locale): + names = ["ro", "ro-ro"] + + past = "{0} în urmă" + future = "peste {0}" + and_word = "și" + + timeframes = { + "now": "acum", + "second": "o secunda", + "seconds": "{0} câteva secunde", + "minute": "un minut", + "minutes": "{0} minute", + "hour": "o oră", + "hours": "{0} ore", + "day": "o zi", + "days": "{0} zile", + "month": "o lună", + "months": "{0} luni", + "year": "un an", + "years": "{0} ani", + } + + month_names = [ + "", + "ianuarie", + "februarie", + "martie", + "aprilie", + "mai", + "iunie", + "iulie", + "august", + "septembrie", + "octombrie", + "noiembrie", + "decembrie", + ] + month_abbreviations = [ + "", + "ian", + "febr", + "mart", + "apr", + "mai", + "iun", + "iul", + "aug", + "sept", + "oct", + "nov", + "dec", + ] + + day_names = [ + "", + "luni", + "marți", + "miercuri", + "joi", + "vineri", + "sâmbătă", + "duminică", + ] + day_abbreviations = ["", "Lun", "Mar", "Mie", "Joi", "Vin", "Sâm", "Dum"] + + +class SlovenianLocale(Locale): + names = ["sl", "sl-si"] + + past = "pred {0}" + future = "čez {0}" + and_word = "in" + + timeframes = { + "now": "zdaj", + "second": "sekundo", + "seconds": "{0} sekund", + "minute": "minuta", + "minutes": "{0} minutami", + "hour": "uro", + "hours": "{0} ur", + "day": "dan", + "days": "{0} dni", + "month": "mesec", + "months": "{0} mesecev", + "year": "leto", + "years": "{0} let", + } + + meridians = {"am": "", "pm": "", "AM": "", "PM": ""} + + month_names = [ + "", + "Januar", + "Februar", + "Marec", + "April", + "Maj", + "Junij", + "Julij", + "Avgust", + "September", + "Oktober", + "November", + "December", + ] + + month_abbreviations = [ + "", + "Jan", + "Feb", + "Mar", + "Apr", + "Maj", + "Jun", + "Jul", + "Avg", + "Sep", + "Okt", + "Nov", + "Dec", + ] + + day_names = [ + "", + "Ponedeljek", + "Torek", + "Sreda", + "Četrtek", + "Petek", + "Sobota", + "Nedelja", + ] + + day_abbreviations = ["", "Pon", "Tor", "Sre", "Čet", "Pet", "Sob", "Ned"] + + +class IndonesianLocale(Locale): + names = ["id", "id-id"] + + past = "{0} yang lalu" + future = "dalam {0}" + and_word = "dan" + + timeframes = { + "now": "baru saja", + "second": "1 sebentar", + "seconds": "{0} detik", + "minute": "1 menit", + "minutes": "{0} menit", + "hour": "1 jam", + "hours": "{0} jam", + "day": "1 hari", + "days": "{0} hari", + "week": "1 minggu", + "weeks": "{0} minggu", + "month": "1 bulan", + "months": "{0} bulan", + "quarter": "1 kuartal", + "quarters": "{0} kuartal", + "year": "1 tahun", + "years": "{0} tahun", + } + + meridians = {"am": "", "pm": "", "AM": "", "PM": ""} + + month_names = [ + "", + "Januari", + "Februari", + "Maret", + "April", + "Mei", + "Juni", + "Juli", + "Agustus", + "September", + "Oktober", + "November", + "Desember", + ] + + month_abbreviations = [ + "", + "Jan", + "Feb", + "Mar", + "Apr", + "Mei", + "Jun", + "Jul", + "Ags", + "Sept", + "Okt", + "Nov", + "Des", + ] + + day_names = ["", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu", "Minggu"] + + day_abbreviations = [ + "", + "Senin", + "Selasa", + "Rabu", + "Kamis", + "Jumat", + "Sabtu", + "Minggu", + ] + + +class NepaliLocale(Locale): + names = ["ne", "ne-np"] + + past = "{0} पहिले" + future = "{0} पछी" + + timeframes = { + "now": "अहिले", + "second": "एक सेकेन्ड", + "seconds": "{0} सेकण्ड", + "minute": "मिनेट", + "minutes": "{0} मिनेट", + "hour": "एक घण्टा", + "hours": "{0} घण्टा", + "day": "एक दिन", + "days": "{0} दिन", + "month": "एक महिना", + "months": "{0} महिना", + "year": "एक बर्ष", + "years": "{0} बर्ष", + } + + meridians = {"am": "पूर्वाह्न", "pm": "अपरान्ह", "AM": "पूर्वाह्न", "PM": "अपरान्ह"} + + month_names = [ + "", + "जनवरी", + "फेब्रुअरी", + "मार्च", + "एप्रील", + "मे", + "जुन", + "जुलाई", + "अगष्ट", + "सेप्टेम्बर", + "अक्टोबर", + "नोवेम्बर", + "डिसेम्बर", + ] + month_abbreviations = [ + "", + "जन", + "फेब", + "मार्च", + "एप्रील", + "मे", + "जुन", + "जुलाई", + "अग", + "सेप", + "अक्ट", + "नोव", + "डिस", + ] + + day_names = [ + "", + "सोमवार", + "मंगलवार", + "बुधवार", + "बिहिवार", + "शुक्रवार", + "शनिवार", + "आइतवार", + ] + + day_abbreviations = ["", "सोम", "मंगल", "बुध", "बिहि", "शुक्र", "शनि", "आइत"] + + +class EstonianLocale(Locale): + names = ["ee", "et"] + + past = "{0} tagasi" + future = "{0} pärast" + and_word = "ja" + + timeframes: ClassVar[Mapping[TimeFrameLiteral, Mapping[str, str]]] = { + "now": {"past": "just nüüd", "future": "just nüüd"}, + "second": {"past": "üks sekund", "future": "ühe sekundi"}, + "seconds": {"past": "{0} sekundit", "future": "{0} sekundi"}, + "minute": {"past": "üks minut", "future": "ühe minuti"}, + "minutes": {"past": "{0} minutit", "future": "{0} minuti"}, + "hour": {"past": "tund aega", "future": "tunni aja"}, + "hours": {"past": "{0} tundi", "future": "{0} tunni"}, + "day": {"past": "üks päev", "future": "ühe päeva"}, + "days": {"past": "{0} päeva", "future": "{0} päeva"}, + "month": {"past": "üks kuu", "future": "ühe kuu"}, + "months": {"past": "{0} kuud", "future": "{0} kuu"}, + "year": {"past": "üks aasta", "future": "ühe aasta"}, + "years": {"past": "{0} aastat", "future": "{0} aasta"}, + } + + month_names = [ + "", + "Jaanuar", + "Veebruar", + "Märts", + "Aprill", + "Mai", + "Juuni", + "Juuli", + "August", + "September", + "Oktoober", + "November", + "Detsember", + ] + month_abbreviations = [ + "", + "Jan", + "Veb", + "Mär", + "Apr", + "Mai", + "Jun", + "Jul", + "Aug", + "Sep", + "Okt", + "Nov", + "Dets", + ] + + day_names = [ + "", + "Esmaspäev", + "Teisipäev", + "Kolmapäev", + "Neljapäev", + "Reede", + "Laupäev", + "Pühapäev", + ] + day_abbreviations = ["", "Esm", "Teis", "Kolm", "Nelj", "Re", "Lau", "Püh"] + + def _format_timeframe(self, timeframe: TimeFrameLiteral, delta: int) -> str: + form = self.timeframes[timeframe] + if delta > 0: + _form = form["future"] + else: + _form = form["past"] + return _form.format(abs(delta)) + + +class LatvianLocale(Locale): + names = ["lv", "lv-lv"] + + past = "pirms {0}" + future = "pēc {0}" + and_word = "un" + + timeframes: ClassVar[Mapping[TimeFrameLiteral, Union[str, Mapping[str, str]]]] = { + "now": "tagad", + "second": "sekundes", + "seconds": "{0} sekundēm", + "minute": "minūtes", + "minutes": "{0} minūtēm", + "hour": "stundas", + "hours": "{0} stundām", + "day": "dienas", + "days": "{0} dienām", + "week": "nedēļas", + "weeks": "{0} nedēļām", + "month": "mēneša", + "months": "{0} mēnešiem", + "year": "gada", + "years": "{0} gadiem", + } + + month_names = [ + "", + "janvāris", + "februāris", + "marts", + "aprīlis", + "maijs", + "jūnijs", + "jūlijs", + "augusts", + "septembris", + "oktobris", + "novembris", + "decembris", + ] + + month_abbreviations = [ + "", + "jan", + "feb", + "marts", + "apr", + "maijs", + "jūnijs", + "jūlijs", + "aug", + "sept", + "okt", + "nov", + "dec", + ] + + day_names = [ + "", + "pirmdiena", + "otrdiena", + "trešdiena", + "ceturtdiena", + "piektdiena", + "sestdiena", + "svētdiena", + ] + + day_abbreviations = [ + "", + "pi", + "ot", + "tr", + "ce", + "pi", + "se", + "sv", + ] + + +class SwahiliLocale(Locale): + names = [ + "sw", + "sw-ke", + "sw-tz", + ] + + past = "{0} iliyopita" + future = "muda wa {0}" + and_word = "na" + + timeframes = { + "now": "sasa hivi", + "second": "sekunde", + "seconds": "sekunde {0}", + "minute": "dakika moja", + "minutes": "dakika {0}", + "hour": "saa moja", + "hours": "saa {0}", + "day": "siku moja", + "days": "siku {0}", + "week": "wiki moja", + "weeks": "wiki {0}", + "month": "mwezi moja", + "months": "miezi {0}", + "year": "mwaka moja", + "years": "miaka {0}", + } + + meridians = {"am": "asu", "pm": "mch", "AM": "ASU", "PM": "MCH"} + + month_names = [ + "", + "Januari", + "Februari", + "Machi", + "Aprili", + "Mei", + "Juni", + "Julai", + "Agosti", + "Septemba", + "Oktoba", + "Novemba", + "Desemba", + ] + month_abbreviations = [ + "", + "Jan", + "Feb", + "Mac", + "Apr", + "Mei", + "Jun", + "Jul", + "Ago", + "Sep", + "Okt", + "Nov", + "Des", + ] + + day_names = [ + "", + "Jumatatu", + "Jumanne", + "Jumatano", + "Alhamisi", + "Ijumaa", + "Jumamosi", + "Jumapili", + ] + day_abbreviations = [ + "", + "Jumatatu", + "Jumanne", + "Jumatano", + "Alhamisi", + "Ijumaa", + "Jumamosi", + "Jumapili", + ] + + +class CroatianLocale(Locale): + names = ["hr", "hr-hr"] + + past = "prije {0}" + future = "za {0}" + and_word = "i" + + timeframes: ClassVar[Mapping[TimeFrameLiteral, Union[str, Mapping[str, str]]]] = { + "now": "upravo sad", + "second": "sekundu", + "seconds": {"double": "{0} sekunde", "higher": "{0} sekundi"}, + "minute": "minutu", + "minutes": {"double": "{0} minute", "higher": "{0} minuta"}, + "hour": "sat", + "hours": {"double": "{0} sata", "higher": "{0} sati"}, + "day": "jedan dan", + "days": {"double": "{0} dana", "higher": "{0} dana"}, + "week": "tjedan", + "weeks": {"double": "{0} tjedna", "higher": "{0} tjedana"}, + "month": "mjesec", + "months": {"double": "{0} mjeseca", "higher": "{0} mjeseci"}, + "year": "godinu", + "years": {"double": "{0} godine", "higher": "{0} godina"}, + } + + month_names = [ + "", + "siječanj", + "veljača", + "ožujak", + "travanj", + "svibanj", + "lipanj", + "srpanj", + "kolovoz", + "rujan", + "listopad", + "studeni", + "prosinac", + ] + + month_abbreviations = [ + "", + "siječ", + "velj", + "ožuj", + "trav", + "svib", + "lip", + "srp", + "kol", + "ruj", + "list", + "stud", + "pros", + ] + + day_names = [ + "", + "ponedjeljak", + "utorak", + "srijeda", + "četvrtak", + "petak", + "subota", + "nedjelja", + ] + + day_abbreviations = [ + "", + "po", + "ut", + "sr", + "če", + "pe", + "su", + "ne", + ] + + def _format_timeframe(self, timeframe: TimeFrameLiteral, delta: int) -> str: + form = self.timeframes[timeframe] + delta = abs(delta) + if isinstance(form, Mapping): + if 1 < delta <= 4: + form = form["double"] + else: + form = form["higher"] + + return form.format(delta) + + +class LatinLocale(Locale): + names = ["la", "la-va"] + + past = "ante {0}" + future = "in {0}" + and_word = "et" + + timeframes: ClassVar[Mapping[TimeFrameLiteral, Union[str, Mapping[str, str]]]] = { + "now": "nunc", + "second": "secundum", + "seconds": "{0} secundis", + "minute": "minutam", + "minutes": "{0} minutis", + "hour": "horam", + "hours": "{0} horas", + "day": "diem", + "days": "{0} dies", + "week": "hebdomadem", + "weeks": "{0} hebdomades", + "month": "mensem", + "months": "{0} mensis", + "year": "annum", + "years": "{0} annos", + } + + month_names = [ + "", + "Ianuarius", + "Februarius", + "Martius", + "Aprilis", + "Maius", + "Iunius", + "Iulius", + "Augustus", + "September", + "October", + "November", + "December", + ] + + month_abbreviations = [ + "", + "Ian", + "Febr", + "Mart", + "Apr", + "Mai", + "Iun", + "Iul", + "Aug", + "Sept", + "Oct", + "Nov", + "Dec", + ] + + day_names = [ + "", + "dies Lunae", + "dies Martis", + "dies Mercurii", + "dies Iovis", + "dies Veneris", + "dies Saturni", + "dies Solis", + ] + + day_abbreviations = [ + "", + "dies Lunae", + "dies Martis", + "dies Mercurii", + "dies Iovis", + "dies Veneris", + "dies Saturni", + "dies Solis", + ] + + +class LithuanianLocale(Locale): + names = ["lt", "lt-lt"] + + past = "prieš {0}" + future = "po {0}" + and_word = "ir" + + timeframes: ClassVar[Mapping[TimeFrameLiteral, Union[str, Mapping[str, str]]]] = { + "now": "dabar", + "second": "sekundės", + "seconds": "{0} sekundžių", + "minute": "minutės", + "minutes": "{0} minučių", + "hour": "valandos", + "hours": "{0} valandų", + "day": "dieną", + "days": "{0} dienų", + "week": "savaitės", + "weeks": "{0} savaičių", + "month": "mėnesio", + "months": "{0} mėnesių", + "year": "metų", + "years": "{0} metų", + } + + month_names = [ + "", + "sausis", + "vasaris", + "kovas", + "balandis", + "gegužė", + "birželis", + "liepa", + "rugpjūtis", + "rugsėjis", + "spalis", + "lapkritis", + "gruodis", + ] + + month_abbreviations = [ + "", + "saus", + "vas", + "kovas", + "bal", + "geg", + "birž", + "liepa", + "rugp", + "rugs", + "spalis", + "lapkr", + "gr", + ] + + day_names = [ + "", + "pirmadienis", + "antradienis", + "trečiadienis", + "ketvirtadienis", + "penktadienis", + "šeštadienis", + "sekmadienis", + ] + + day_abbreviations = [ + "", + "pi", + "an", + "tr", + "ke", + "pe", + "še", + "se", + ] + + +class MalayLocale(Locale): + names = ["ms", "ms-my", "ms-bn"] + + past = "{0} yang lalu" + future = "dalam {0}" + and_word = "dan" + + timeframes: ClassVar[Mapping[TimeFrameLiteral, Union[str, Mapping[str, str]]]] = { + "now": "sekarang", + "second": "saat", + "seconds": "{0} saat", + "minute": "minit", + "minutes": "{0} minit", + "hour": "jam", + "hours": "{0} jam", + "day": "hari", + "days": "{0} hari", + "week": "minggu", + "weeks": "{0} minggu", + "month": "bulan", + "months": "{0} bulan", + "year": "tahun", + "years": "{0} tahun", + } + + month_names = [ + "", + "Januari", + "Februari", + "Mac", + "April", + "Mei", + "Jun", + "Julai", + "Ogos", + "September", + "Oktober", + "November", + "Disember", + ] + + month_abbreviations = [ + "", + "Jan.", + "Feb.", + "Mac", + "Apr.", + "Mei", + "Jun", + "Julai", + "Og.", + "Sept.", + "Okt.", + "Nov.", + "Dis.", + ] + + day_names = [ + "", + "Isnin", + "Selasa", + "Rabu", + "Khamis", + "Jumaat", + "Sabtu", + "Ahad", + ] + + day_abbreviations = [ + "", + "Isnin", + "Selasa", + "Rabu", + "Khamis", + "Jumaat", + "Sabtu", + "Ahad", + ] + + +class MalteseLocale(Locale): + names = ["mt", "mt-mt"] + + past = "{0} ilu" + future = "fi {0}" + and_word = "u" + + timeframes: ClassVar[Mapping[TimeFrameLiteral, Union[str, Mapping[str, str]]]] = { + "now": "issa", + "second": "sekonda", + "seconds": "{0} sekondi", + "minute": "minuta", + "minutes": "{0} minuti", + "hour": "siegħa", + "hours": {"dual": "{0} sagħtejn", "plural": "{0} sigħat"}, + "day": "jum", + "days": {"dual": "{0} jumejn", "plural": "{0} ijiem"}, + "week": "ġimgħa", + "weeks": {"dual": "{0} ġimagħtejn", "plural": "{0} ġimgħat"}, + "month": "xahar", + "months": {"dual": "{0} xahrejn", "plural": "{0} xhur"}, + "year": "sena", + "years": {"dual": "{0} sentejn", "plural": "{0} snin"}, + } + + month_names = [ + "", + "Jannar", + "Frar", + "Marzu", + "April", + "Mejju", + "Ġunju", + "Lulju", + "Awwissu", + "Settembru", + "Ottubru", + "Novembru", + "Diċembru", + ] + + month_abbreviations = [ + "", + "Jan", + "Fr", + "Mar", + "Apr", + "Mejju", + "Ġun", + "Lul", + "Aw", + "Sett", + "Ott", + "Nov", + "Diċ", + ] + + day_names = [ + "", + "It-Tnejn", + "It-Tlieta", + "L-Erbgħa", + "Il-Ħamis", + "Il-Ġimgħa", + "Is-Sibt", + "Il-Ħadd", + ] + + day_abbreviations = [ + "", + "T", + "TL", + "E", + "Ħ", + "Ġ", + "S", + "Ħ", + ] + + def _format_timeframe(self, timeframe: TimeFrameLiteral, delta: int) -> str: + form = self.timeframes[timeframe] + delta = abs(delta) + if isinstance(form, Mapping): + if delta == 2: + form = form["dual"] + else: + form = form["plural"] + + return form.format(delta) + + +class SamiLocale(Locale): + names = ["se", "se-fi", "se-no", "se-se"] + + past = "{0} dassái" + future = "{0} " # NOTE: couldn't find preposition for Sami here, none needed? + + timeframes: ClassVar[Mapping[TimeFrameLiteral, Union[str, Mapping[str, str]]]] = { + "now": "dál", + "second": "sekunda", + "seconds": "{0} sekundda", + "minute": "minuhta", + "minutes": "{0} minuhta", + "hour": "diimmu", + "hours": "{0} diimmu", + "day": "beaivvi", + "days": "{0} beaivvi", + "week": "vahku", + "weeks": "{0} vahku", + "month": "mánu", + "months": "{0} mánu", + "year": "jagi", + "years": "{0} jagi", + } + + month_names = [ + "", + "Ođđajagimánnu", + "Guovvamánnu", + "Njukčamánnu", + "Cuoŋománnu", + "Miessemánnu", + "Geassemánnu", + "Suoidnemánnu", + "Borgemánnu", + "Čakčamánnu", + "Golggotmánnu", + "Skábmamánnu", + "Juovlamánnu", + ] + + month_abbreviations = [ + "", + "Ođđajagimánnu", + "Guovvamánnu", + "Njukčamánnu", + "Cuoŋománnu", + "Miessemánnu", + "Geassemánnu", + "Suoidnemánnu", + "Borgemánnu", + "Čakčamánnu", + "Golggotmánnu", + "Skábmamánnu", + "Juovlamánnu", + ] + + day_names = [ + "", + "Mánnodat", + "Disdat", + "Gaskavahkku", + "Duorastat", + "Bearjadat", + "Lávvordat", + "Sotnabeaivi", + ] + + day_abbreviations = [ + "", + "Mánnodat", + "Disdat", + "Gaskavahkku", + "Duorastat", + "Bearjadat", + "Lávvordat", + "Sotnabeaivi", + ] + + +class OdiaLocale(Locale): + names = ["or", "or-in"] + + past = "{0} ପୂର୍ବେ" + future = "{0} ପରେ" + + timeframes = { + "now": "ବର୍ତ୍ତମାନ", + "second": "ଏକ ସେକେଣ୍ଡ", + "seconds": "{0} ସେକେଣ୍ଡ", + "minute": "ଏକ ମିନଟ", + "minutes": "{0} ମିନଟ", + "hour": "ଏକ ଘଣ୍ଟା", + "hours": "{0} ଘଣ୍ଟା", + "day": "ଏକ ଦିନ", + "days": "{0} ଦିନ", + "month": "ଏକ ମାସ", + "months": "{0} ମାସ ", + "year": "ଏକ ବର୍ଷ", + "years": "{0} ବର୍ଷ", + } + + meridians = {"am": "ପୂର୍ବାହ୍ନ", "pm": "ଅପରାହ୍ନ", "AM": "ପୂର୍ବାହ୍ନ", "PM": "ଅପରାହ୍ନ"} + + month_names = [ + "", + "ଜାନୁଆରୀ", + "ଫେବୃଆରୀ", + "ମାର୍ଚ୍ଚ୍", + "ଅପ୍ରେଲ", + "ମଇ", + "ଜୁନ୍", + "ଜୁଲାଇ", + "ଅଗଷ୍ଟ", + "ସେପ୍ଟେମ୍ବର", + "ଅକ୍ଟୋବର୍", + "ନଭେମ୍ବର୍", + "ଡିସେମ୍ବର୍", + ] + month_abbreviations = [ + "", + "ଜାନୁ", + "ଫେବୃ", + "ମାର୍ଚ୍ଚ୍", + "ଅପ୍ରେ", + "ମଇ", + "ଜୁନ୍", + "ଜୁଲା", + "ଅଗ", + "ସେପ୍ଟେ", + "ଅକ୍ଟୋ", + "ନଭେ", + "ଡିସେ", + ] + + day_names = [ + "", + "ସୋମବାର", + "ମଙ୍ଗଳବାର", + "ବୁଧବାର", + "ଗୁରୁବାର", + "ଶୁକ୍ରବାର", + "ଶନିବାର", + "ରବିବାର", + ] + day_abbreviations = [ + "", + "ସୋମ", + "ମଙ୍ଗଳ", + "ବୁଧ", + "ଗୁରୁ", + "ଶୁକ୍ର", + "ଶନି", + "ରବି", + ] + + def _ordinal_number(self, n: int) -> str: + if n > 10 or n == 0: + return f"{n}ତମ" + if n in [1, 5, 7, 8, 9, 10]: + return f"{n}ମ" + if n in [2, 3]: + return f"{n}ୟ" + if n == 4: + return f"{n}ର୍ଥ" + if n == 6: + return f"{n}ଷ୍ଠ" + return "" + + +class SerbianLocale(Locale): + names = ["sr", "sr-rs", "sr-sp"] + + past = "pre {0}" + future = "za {0}" + and_word = "i" + + timeframes: ClassVar[Mapping[TimeFrameLiteral, Union[str, Mapping[str, str]]]] = { + "now": "sada", + "second": "sekundu", + "seconds": {"double": "{0} sekunde", "higher": "{0} sekundi"}, + "minute": "minutu", + "minutes": {"double": "{0} minute", "higher": "{0} minuta"}, + "hour": "sat", + "hours": {"double": "{0} sata", "higher": "{0} sati"}, + "day": "dan", + "days": {"double": "{0} dana", "higher": "{0} dana"}, + "week": "nedelju", + "weeks": {"double": "{0} nedelje", "higher": "{0} nedelja"}, + "month": "mesec", + "months": {"double": "{0} meseca", "higher": "{0} meseci"}, + "year": "godinu", + "years": {"double": "{0} godine", "higher": "{0} godina"}, + } + + month_names = [ + "", + "januar", # јануар + "februar", # фебруар + "mart", # март + "april", # април + "maj", # мај + "jun", # јун + "jul", # јул + "avgust", # август + "septembar", # септембар + "oktobar", # октобар + "novembar", # новембар + "decembar", # децембар + ] + + month_abbreviations = [ + "", + "jan", + "feb", + "mar", + "apr", + "maj", + "jun", + "jul", + "avg", + "sep", + "okt", + "nov", + "dec", + ] + + day_names = [ + "", + "ponedeljak", # понедељак + "utorak", # уторак + "sreda", # среда + "četvrtak", # четвртак + "petak", # петак + "subota", # субота + "nedelja", # недеља + ] + + day_abbreviations = [ + "", + "po", # по + "ut", # ут + "sr", # ср + "če", # че + "pe", # пе + "su", # су + "ne", # не + ] + + def _format_timeframe(self, timeframe: TimeFrameLiteral, delta: int) -> str: + form = self.timeframes[timeframe] + delta = abs(delta) + if isinstance(form, Mapping): + if 1 < delta <= 4: + form = form["double"] + else: + form = form["higher"] + + return form.format(delta) + + +class LuxembourgishLocale(Locale): + names = ["lb", "lb-lu"] + + past = "virun {0}" + future = "an {0}" + and_word = "an" + + timeframes = { + "now": "just elo", + "second": "enger Sekonn", + "seconds": "{0} Sekonnen", + "minute": "enger Minutt", + "minutes": "{0} Minutten", + "hour": "enger Stonn", + "hours": "{0} Stonnen", + "day": "engem Dag", + "days": "{0} Deeg", + "week": "enger Woch", + "weeks": "{0} Wochen", + "month": "engem Mount", + "months": "{0} Méint", + "year": "engem Joer", + "years": "{0} Jahren", + } + + timeframes_only_distance = timeframes.copy() + timeframes_only_distance["second"] = "eng Sekonn" + timeframes_only_distance["minute"] = "eng Minutt" + timeframes_only_distance["hour"] = "eng Stonn" + timeframes_only_distance["day"] = "een Dag" + timeframes_only_distance["days"] = "{0} Deeg" + timeframes_only_distance["week"] = "eng Woch" + timeframes_only_distance["month"] = "ee Mount" + timeframes_only_distance["months"] = "{0} Méint" + timeframes_only_distance["year"] = "ee Joer" + timeframes_only_distance["years"] = "{0} Joer" + + month_names = [ + "", + "Januar", + "Februar", + "Mäerz", + "Abrëll", + "Mee", + "Juni", + "Juli", + "August", + "September", + "Oktouber", + "November", + "Dezember", + ] + + month_abbreviations = [ + "", + "Jan", + "Feb", + "Mäe", + "Abr", + "Mee", + "Jun", + "Jul", + "Aug", + "Sep", + "Okt", + "Nov", + "Dez", + ] + + day_names = [ + "", + "Méindeg", + "Dënschdeg", + "Mëttwoch", + "Donneschdeg", + "Freideg", + "Samschdeg", + "Sonndeg", + ] + + day_abbreviations = ["", "Méi", "Dën", "Mët", "Don", "Fre", "Sam", "Son"] + + def _ordinal_number(self, n: int) -> str: + return f"{n}." + + def describe( + self, + timeframe: TimeFrameLiteral, + delta: Union[int, float] = 0, + only_distance: bool = False, + ) -> str: + if not only_distance: + return super().describe(timeframe, delta, only_distance) + + # Luxembourgish uses a different case without 'in' or 'ago' + humanized = self.timeframes_only_distance[timeframe].format(trunc(abs(delta))) + + return humanized + + +class ZuluLocale(Locale): + names = ["zu", "zu-za"] + + past = "{0} edlule" + future = "{0} " + and_word = "futhi" + + timeframes: ClassVar[Mapping[TimeFrameLiteral, Union[Mapping[str, str], str]]] = { + "now": "manje", + "second": {"past": "umzuzwana", "future": "ngomzuzwana"}, + "seconds": {"past": "{0} imizuzwana", "future": "{0} ngemizuzwana"}, + "minute": {"past": "umzuzu", "future": "ngomzuzu"}, + "minutes": {"past": "{0} imizuzu", "future": "{0} ngemizuzu"}, + "hour": {"past": "ihora", "future": "ngehora"}, + "hours": {"past": "{0} amahora", "future": "{0} emahoreni"}, + "day": {"past": "usuku", "future": "ngosuku"}, + "days": {"past": "{0} izinsuku", "future": "{0} ezinsukwini"}, + "week": {"past": "isonto", "future": "ngesonto"}, + "weeks": {"past": "{0} amasonto", "future": "{0} emasontweni"}, + "month": {"past": "inyanga", "future": "ngenyanga"}, + "months": {"past": "{0} izinyanga", "future": "{0} ezinyangeni"}, + "year": {"past": "unyaka", "future": "ngonyak"}, + "years": {"past": "{0} iminyaka", "future": "{0} eminyakeni"}, + } + + def _format_timeframe(self, timeframe: TimeFrameLiteral, delta: int) -> str: + """Zulu aware time frame format function, takes into account + the differences between past and future forms.""" + abs_delta = abs(delta) + form = self.timeframes[timeframe] + + if isinstance(form, str): + return form.format(abs_delta) + + if delta > 0: + key = "future" + else: + key = "past" + form = form[key] + + return form.format(abs_delta) + + month_names = [ + "", + "uMasingane", + "uNhlolanja", + "uNdasa", + "UMbasa", + "UNhlaba", + "UNhlangulana", + "uNtulikazi", + "UNcwaba", + "uMandulo", + "uMfumfu", + "uLwezi", + "uZibandlela", + ] + + month_abbreviations = [ + "", + "uMasingane", + "uNhlolanja", + "uNdasa", + "UMbasa", + "UNhlaba", + "UNhlangulana", + "uNtulikazi", + "UNcwaba", + "uMandulo", + "uMfumfu", + "uLwezi", + "uZibandlela", + ] + + day_names = [ + "", + "uMsombuluko", + "uLwesibili", + "uLwesithathu", + "uLwesine", + "uLwesihlanu", + "uMgqibelo", + "iSonto", + ] + + day_abbreviations = [ + "", + "uMsombuluko", + "uLwesibili", + "uLwesithathu", + "uLwesine", + "uLwesihlanu", + "uMgqibelo", + "iSonto", + ] + + +class TamilLocale(Locale): + names = ["ta", "ta-in", "ta-lk"] + + past = "{0} நேரத்திற்கு முன்பு" + future = "இல் {0}" + + timeframes = { + "now": "இப்போது", + "second": "ஒரு இரண்டாவது", + "seconds": "{0} விநாடிகள்", + "minute": "ஒரு நிமிடம்", + "minutes": "{0} நிமிடங்கள்", + "hour": "ஒரு மணி", + "hours": "{0} மணிநேரம்", + "day": "ஒரு நாள்", + "days": "{0} நாட்கள்", + "week": "ஒரு வாரம்", + "weeks": "{0} வாரங்கள்", + "month": "ஒரு மாதம்", + "months": "{0} மாதங்கள்", + "year": "ஒரு ஆண்டு", + "years": "{0} ஆண்டுகள்", + } + + month_names = [ + "", + "சித்திரை", + "வைகாசி", + "ஆனி", + "ஆடி", + "ஆவணி", + "புரட்டாசி", + "ஐப்பசி", + "கார்த்திகை", + "மார்கழி", + "தை", + "மாசி", + "பங்குனி", + ] + + month_abbreviations = [ + "", + "ஜன", + "பிப்", + "மார்", + "ஏப்", + "மே", + "ஜூன்", + "ஜூலை", + "ஆக", + "செப்", + "அக்", + "நவ", + "டிச", + ] + + day_names = [ + "", + "திங்கட்கிழமை", + "செவ்வாய்க்கிழமை", + "புதன்கிழமை", + "வியாழக்கிழமை", + "வெள்ளிக்கிழமை", + "சனிக்கிழமை", + "ஞாயிற்றுக்கிழமை", + ] + + day_abbreviations = [ + "", + "திங்கட்", + "செவ்வாய்", + "புதன்", + "வியாழன்", + "வெள்ளி", + "சனி", + "ஞாயிறு", + ] + + def _ordinal_number(self, n: int) -> str: + if n == 1: + return f"{n}வது" + elif n >= 0: + return f"{n}ஆம்" + else: + return "" + + +class AlbanianLocale(Locale): + names = ["sq", "sq-al"] + + past = "{0} më parë" + future = "në {0}" + and_word = "dhe" + + timeframes = { + "now": "tani", + "second": "sekondë", + "seconds": "{0} sekonda", + "minute": "minutë", + "minutes": "{0} minuta", + "hour": "orë", + "hours": "{0} orë", + "day": "ditë", + "days": "{0} ditë", + "week": "javë", + "weeks": "{0} javë", + "month": "muaj", + "months": "{0} muaj", + "year": "vit", + "years": "{0} vjet", + } + + month_names = [ + "", + "janar", + "shkurt", + "mars", + "prill", + "maj", + "qershor", + "korrik", + "gusht", + "shtator", + "tetor", + "nëntor", + "dhjetor", + ] + + month_abbreviations = [ + "", + "jan", + "shk", + "mar", + "pri", + "maj", + "qer", + "korr", + "gush", + "sht", + "tet", + "nën", + "dhj", + ] + + day_names = [ + "", + "e hënë", + "e martë", + "e mërkurë", + "e enjte", + "e premte", + "e shtunë", + "e diel", + ] + + day_abbreviations = [ + "", + "hën", + "mar", + "mër", + "enj", + "pre", + "sht", + "die", + ] + + +class GeorgianLocale(Locale): + names = ["ka", "ka-ge"] + + past = "{0} წინ" # ts’in + future = "{0} შემდეგ" # shemdeg + and_word = "და" # da + + timeframes = { + "now": "ახლა", # akhla + # When a cardinal qualifies a noun, it stands in the singular + "second": "წამის", # ts’amis + "seconds": "{0} წამის", + "minute": "წუთის", # ts’utis + "minutes": "{0} წუთის", + "hour": "საათის", # saatis + "hours": "{0} საათის", + "day": "დღის", # dghis + "days": "{0} დღის", + "week": "კვირის", # k’viris + "weeks": "{0} კვირის", + "month": "თვის", # tvis + "months": "{0} თვის", + "year": "წლის", # ts’lis + "years": "{0} წლის", + } + + month_names = [ + # modern month names + "", + "იანვარი", # Ianvari + "თებერვალი", # Tebervali + "მარტი", # Mart'i + "აპრილი", # Ap'rili + "მაისი", # Maisi + "ივნისი", # Ivnisi + "ივლისი", # Ivlisi + "აგვისტო", # Agvist'o + "სექტემბერი", # Sekt'emberi + "ოქტომბერი", # Okt'omberi + "ნოემბერი", # Noemberi + "დეკემბერი", # Dek'emberi + ] + + month_abbreviations = [ + # no abbr. found yet + "", + "იანვარი", # Ianvari + "თებერვალი", # Tebervali + "მარტი", # Mart'i + "აპრილი", # Ap'rili + "მაისი", # Maisi + "ივნისი", # Ivnisi + "ივლისი", # Ivlisi + "აგვისტო", # Agvist'o + "სექტემბერი", # Sekt'emberi + "ოქტომბერი", # Okt'omberi + "ნოემბერი", # Noemberi + "დეკემბერი", # Dek'emberi + ] + + day_names = [ + "", + "ორშაბათი", # orshabati + "სამშაბათი", # samshabati + "ოთხშაბათი", # otkhshabati + "ხუთშაბათი", # khutshabati + "პარასკევი", # p’arask’evi + "შაბათი", # shabati + # "k’vira" also serves as week; to avoid confusion "k’vira-dge" can be used for Sunday + "კვირა", # k’vira + ] + + day_abbreviations = [ + "", + "ორშაბათი", # orshabati + "სამშაბათი", # samshabati + "ოთხშაბათი", # otkhshabati + "ხუთშაბათი", # khutshabati + "პარასკევი", # p’arask’evi + "შაბათი", # shabati + "კვირა", # k’vira + ] + + +class SinhalaLocale(Locale): + names = ["si", "si-lk"] + + past = "{0}ට පෙර" + future = "{0}" + and_word = "සහ" + + timeframes: ClassVar[Mapping[TimeFrameLiteral, Union[Mapping[str, str], str]]] = { + "now": "දැන්", + "second": { + "past": "තත්පරයක", + "future": "තත්පරයකින්", + }, # ක් is the article + "seconds": { + "past": "තත්පර {0} ක", + "future": "තත්පර {0} කින්", + }, + "minute": { + "past": "විනාඩියක", + "future": "විනාඩියකින්", + }, + "minutes": { + "past": "විනාඩි {0} ක", + "future": "මිනිත්තු {0} කින්", + }, + "hour": {"past": "පැයක", "future": "පැයකින්"}, + "hours": { + "past": "පැය {0} ක", + "future": "පැය {0} කින්", + }, + "day": {"past": "දිනක", "future": "දිනකට"}, + "days": { + "past": "දින {0} ක", + "future": "දින {0} කින්", + }, + "week": {"past": "සතියක", "future": "සතියකින්"}, + "weeks": { + "past": "සති {0} ක", + "future": "සති {0} කින්", + }, + "month": {"past": "මාසයක", "future": "එය මාසය තුළ"}, + "months": { + "past": "මාස {0} ක", + "future": "මාස {0} කින්", + }, + "year": {"past": "වසරක", "future": "වසරක් තුළ"}, + "years": { + "past": "අවුරුදු {0} ක", + "future": "අවුරුදු {0} තුළ", + }, + } + # Sinhala: the general format to describe timeframe is different from past and future, + # so we do not copy the original timeframes dictionary + timeframes_only_distance = {} + timeframes_only_distance["second"] = "තත්පරයක්" + timeframes_only_distance["seconds"] = "තත්පර {0}" + timeframes_only_distance["minute"] = "මිනිත්තුවක්" + timeframes_only_distance["minutes"] = "විනාඩි {0}" + timeframes_only_distance["hour"] = "පැයක්" + timeframes_only_distance["hours"] = "පැය {0}" + timeframes_only_distance["day"] = "දවසක්" + timeframes_only_distance["days"] = "දවස් {0}" + timeframes_only_distance["week"] = "සතියක්" + timeframes_only_distance["weeks"] = "සති {0}" + timeframes_only_distance["month"] = "මාසයක්" + timeframes_only_distance["months"] = "මාස {0}" + timeframes_only_distance["year"] = "අවුරුද්දක්" + timeframes_only_distance["years"] = "අවුරුදු {0}" + + def _format_timeframe(self, timeframe: TimeFrameLiteral, delta: int) -> str: + """ + Sinhala awares time frame format function, takes into account + the differences between general, past, and future forms (three different suffixes). + """ + abs_delta = abs(delta) + form = self.timeframes[timeframe] + + if isinstance(form, str): + return form.format(abs_delta) + + if delta > 0: + key = "future" + else: + key = "past" + form = form[key] + + return form.format(abs_delta) + + def describe( + self, + timeframe: TimeFrameLiteral, + delta: Union[float, int] = 1, # key is always future when only_distance=False + only_distance: bool = False, + ) -> str: + """Describes a delta within a timeframe in plain language. + + :param timeframe: a string representing a timeframe. + :param delta: a quantity representing a delta in a timeframe. + :param only_distance: return only distance eg: "11 seconds" without "in" or "ago" keywords + """ + + if not only_distance: + return super().describe(timeframe, delta, only_distance) + # Sinhala uses a different case without 'in' or 'ago' + humanized = self.timeframes_only_distance[timeframe].format(trunc(abs(delta))) + + return humanized + + month_names = [ + "", + "ජනවාරි", + "පෙබරවාරි", + "මාර්තු", + "අප්‍රේල්", + "මැයි", + "ජූනි", + "ජූලි", + "අගෝස්තු", + "සැප්තැම්බර්", + "ඔක්තෝබර්", + "නොවැම්බර්", + "දෙසැම්බර්", + ] + + month_abbreviations = [ + "", + "ජන", + "පෙබ", + "මාර්", + "අප්‍රේ", + "මැයි", + "ජුනි", + "ජූලි", + "අගෝ", + "සැප්", + "ඔක්", + "නොවැ", + "දෙසැ", + ] + + day_names = [ + "", + "සදුදා", + "අඟහරැවදා", + "බදාදා", + "බ්‍රහස්‍පතින්‍දා", + "සිකුරාදා", + "සෙනසුරාදා", + "ඉරිදා", + ] + + day_abbreviations = [ + "", + "සදුද", + "බදා", + "බදා", + "සිකු", + "සෙන", + "අ", + "ඉරිදා", + ] + + +class UrduLocale(Locale): + names = ["ur", "ur-pk"] + + past = "پہلے {0}" + future = "میں {0}" + and_word = "اور" + + timeframes = { + "now": "ابھی", + "second": "ایک سیکنڈ", + "seconds": "{0} سیکنڈ", + "minute": "ایک منٹ", + "minutes": "{0} منٹ", + "hour": "ایک گھنٹے", + "hours": "{0} گھنٹے", + "day": "ایک دن", + "days": "{0} دن", + "week": "ایک ہفتے", + "weeks": "{0} ہفتے", + "month": "ایک مہینہ", + "months": "{0} ماہ", + "year": "ایک سال", + "years": "{0} سال", + } + + month_names = [ + "", + "جنوری", + "فروری", + "مارچ", + "اپریل", + "مئی", + "جون", + "جولائی", + "اگست", + "ستمبر", + "اکتوبر", + "نومبر", + "دسمبر", + ] + + month_abbreviations = [ + "", + "جنوری", + "فروری", + "مارچ", + "اپریل", + "مئی", + "جون", + "جولائی", + "اگست", + "ستمبر", + "اکتوبر", + "نومبر", + "دسمبر", + ] + + day_names = [ + "", + "سوموار", + "منگل", + "بدھ", + "جمعرات", + "جمعہ", + "ہفتہ", + "اتوار", + ] + + day_abbreviations = [ + "", + "سوموار", + "منگل", + "بدھ", + "جمعرات", + "جمعہ", + "ہفتہ", + "اتوار", + ] + + +class KazakhLocale(Locale): + names = ["kk", "kk-kz"] + + past = "{0} бұрын" + future = "{0} кейін" + timeframes = { + "now": "қазір", + "second": "бір секунд", + "seconds": "{0} секунд", + "minute": "бір минут", + "minutes": "{0} минут", + "hour": "бір сағат", + "hours": "{0} сағат", + "day": "бір күн", + "days": "{0} күн", + "week": "бір апта", + "weeks": "{0} апта", + "month": "бір ай", + "months": "{0} ай", + "year": "бір жыл", + "years": "{0} жыл", + } + + month_names = [ + "", + "Қаңтар", + "Ақпан", + "Наурыз", + "Сәуір", + "Мамыр", + "Маусым", + "Шілде", + "Тамыз", + "Қыркүйек", + "Қазан", + "Қараша", + "Желтоқсан", + ] + month_abbreviations = [ + "", + "Қан", + "Ақп", + "Нау", + "Сәу", + "Мам", + "Мау", + "Шіл", + "Там", + "Қыр", + "Қаз", + "Қар", + "Жел", + ] + + day_names = [ + "", + "Дүйсембі", + "Сейсенбі", + "Сәрсенбі", + "Бейсенбі", + "Жұма", + "Сенбі", + "Жексенбі", + ] + day_abbreviations = ["", "Дс", "Сс", "Ср", "Бс", "Жм", "Сб", "Жс"] + + +class AmharicLocale(Locale): + names = ["am", "am-et"] + + past = "{0} በፊት" + future = "{0} ውስጥ" + and_word = "እና" + + timeframes: ClassVar[Mapping[TimeFrameLiteral, Union[Mapping[str, str], str]]] = { + "now": "አሁን", + "second": { + "past": "ከአንድ ሰከንድ", + "future": "በአንድ ሰከንድ", + }, + "seconds": { + "past": "ከ {0} ሰከንድ", + "future": "በ {0} ሰከንድ", + }, + "minute": { + "past": "ከአንድ ደቂቃ", + "future": "በአንድ ደቂቃ", + }, + "minutes": { + "past": "ከ {0} ደቂቃዎች", + "future": "በ {0} ደቂቃዎች", + }, + "hour": { + "past": "ከአንድ ሰዓት", + "future": "በአንድ ሰዓት", + }, + "hours": { + "past": "ከ {0} ሰዓታት", + "future": "በ {0} ሰከንድ", + }, + "day": { + "past": "ከአንድ ቀን", + "future": "በአንድ ቀን", + }, + "days": { + "past": "ከ {0} ቀናት", + "future": "በ {0} ቀናት", + }, + "week": { + "past": "ከአንድ ሳምንት", + "future": "በአንድ ሳምንት", + }, + "weeks": { + "past": "ከ {0} ሳምንታት", + "future": "በ {0} ሳምንታት", + }, + "month": { + "past": "ከአንድ ወር", + "future": "በአንድ ወር", + }, + "months": { + "past": "ከ {0} ወር", + "future": "በ {0} ወራት", + }, + "year": { + "past": "ከአንድ አመት", + "future": "በአንድ አመት", + }, + "years": { + "past": "ከ {0} ዓመታት", + "future": "በ {0} ዓመታት", + }, + } + # Amharic: the general format to describe timeframe is different from past and future, + # so we do not copy the original timeframes dictionary + timeframes_only_distance = { + "second": "አንድ ሰከንድ", + "seconds": "{0} ሰከንድ", + "minute": "አንድ ደቂቃ", + "minutes": "{0} ደቂቃዎች", + "hour": "አንድ ሰዓት", + "hours": "{0} ሰዓት", + "day": "አንድ ቀን", + "days": "{0} ቀናት", + "week": "አንድ ሳምንት", + "weeks": "{0} ሳምንት", + "month": "አንድ ወር", + "months": "{0} ወራት", + "year": "አንድ አመት", + "years": "{0} ዓመታት", + } + + month_names = [ + "", + "ጃንዩወሪ", + "ፌብሩወሪ", + "ማርች", + "ኤፕሪል", + "ሜይ", + "ጁን", + "ጁላይ", + "ኦገስት", + "ሴፕቴምበር", + "ኦክቶበር", + "ኖቬምበር", + "ዲሴምበር", + ] + + month_abbreviations = [ + "", + "ጃንዩ", + "ፌብሩ", + "ማርች", + "ኤፕሪ", + "ሜይ", + "ጁን", + "ጁላይ", + "ኦገስ", + "ሴፕቴ", + "ኦክቶ", + "ኖቬም", + "ዲሴም", + ] + + day_names = [ + "", + "ሰኞ", + "ማክሰኞ", + "ረቡዕ", + "ሐሙስ", + "ዓርብ", + "ቅዳሜ", + "እሑድ", + ] + day_abbreviations = ["", "እ", "ሰ", "ማ", "ረ", "ሐ", "ዓ", "ቅ"] + + def _ordinal_number(self, n: int) -> str: + return f"{n}ኛ" + + def _format_timeframe(self, timeframe: TimeFrameLiteral, delta: int) -> str: + """ + Amharic awares time frame format function, takes into account + the differences between general, past, and future forms (three different suffixes). + """ + abs_delta = abs(delta) + form = self.timeframes[timeframe] + + if isinstance(form, str): + return form.format(abs_delta) + + if delta > 0: + key = "future" + else: + key = "past" + form = form[key] + + return form.format(abs_delta) + + def describe( + self, + timeframe: TimeFrameLiteral, + delta: Union[float, int] = 1, # key is always future when only_distance=False + only_distance: bool = False, + ) -> str: + """Describes a delta within a timeframe in plain language. + + :param timeframe: a string representing a timeframe. + :param delta: a quantity representing a delta in a timeframe. + :param only_distance: return only distance eg: "11 seconds" without "in" or "ago" keywords + """ + + if not only_distance: + return super().describe(timeframe, delta, only_distance) + humanized = self.timeframes_only_distance[timeframe].format(trunc(abs(delta))) + + return humanized + + +class ArmenianLocale(Locale): + names = ["hy", "hy-am"] + past = "{0} առաջ" + future = "{0}ից" + and_word = "Եվ" # Yev + + timeframes = { + "now": "հիմա", + "second": "վայրկյան", + "seconds": "{0} վայրկյան", + "minute": "րոպե", + "minutes": "{0} րոպե", + "hour": "ժամ", + "hours": "{0} ժամ", + "day": "օր", + "days": "{0} օր", + "month": "ամիս", + "months": "{0} ամիս", + "year": "տարին", + "years": "{0} տարին", + "week": "շաբաթ", + "weeks": "{0} շաբաթ", + } + + meridians = { + "am": "Ամ", + "pm": "պ.մ.", + "AM": "Ամ", + "PM": "պ.մ.", + } + + month_names = [ + "", + "հունվար", + "փետրվար", + "մարտ", + "ապրիլ", + "մայիս", + "հունիս", + "հուլիս", + "օգոստոս", + "սեպտեմբեր", + "հոկտեմբեր", + "նոյեմբեր", + "դեկտեմբեր", + ] + + month_abbreviations = [ + "", + "հունվար", + "փետրվար", + "մարտ", + "ապրիլ", + "մայիս", + "հունիս", + "հուլիս", + "օգոստոս", + "սեպտեմբեր", + "հոկտեմբեր", + "նոյեմբեր", + "դեկտեմբեր", + ] + + day_names = [ + "", + "երկուշաբթի", + "երեքշաբթի", + "չորեքշաբթի", + "հինգշաբթի", + "ուրբաթ", + "շաբաթ", + "կիրակի", + ] + + day_abbreviations = [ + "", + "երկ.", + "երեք.", + "չորեք.", + "հինգ.", + "ուրբ.", + "շաբ.", + "կիր.", + ] + + +class UzbekLocale(Locale): + names = ["uz", "uz-uz"] + past = "{0}dan avval" + future = "{0}dan keyin" + timeframes = { + "now": "hozir", + "second": "bir soniya", + "seconds": "{0} soniya", + "minute": "bir daqiqa", + "minutes": "{0} daqiqa", + "hour": "bir soat", + "hours": "{0} soat", + "day": "bir kun", + "days": "{0} kun", + "week": "bir hafta", + "weeks": "{0} hafta", + "month": "bir oy", + "months": "{0} oy", + "year": "bir yil", + "years": "{0} yil", + } + + month_names = [ + "", + "Yanvar", + "Fevral", + "Mart", + "Aprel", + "May", + "Iyun", + "Iyul", + "Avgust", + "Sentyabr", + "Oktyabr", + "Noyabr", + "Dekabr", + ] + + month_abbreviations = [ + "", + "Yan", + "Fev", + "Mar", + "Apr", + "May", + "Iyn", + "Iyl", + "Avg", + "Sen", + "Okt", + "Noy", + "Dek", + ] + + day_names = [ + "", + "Dushanba", + "Seshanba", + "Chorshanba", + "Payshanba", + "Juma", + "Shanba", + "Yakshanba", + ] + + day_abbreviations = ["", "Dush", "Sesh", "Chor", "Pay", "Jum", "Shan", "Yak"] diff --git a/third_party/python/arrow/arrow/parser.py b/third_party/python/arrow/arrow/parser.py new file mode 100644 index 0000000000000..e95d78b0d6ebb --- /dev/null +++ b/third_party/python/arrow/arrow/parser.py @@ -0,0 +1,779 @@ +"""Provides the :class:`Arrow ` class, a better way to parse datetime strings.""" + +import re +import sys +from datetime import datetime, timedelta +from datetime import tzinfo as dt_tzinfo +from functools import lru_cache +from typing import ( + Any, + ClassVar, + Dict, + Iterable, + List, + Match, + Optional, + Pattern, + SupportsFloat, + SupportsInt, + Tuple, + Union, + cast, + overload, +) + +from dateutil import tz + +from arrow import locales +from arrow.constants import DEFAULT_LOCALE +from arrow.util import next_weekday, normalize_timestamp + +if sys.version_info < (3, 8): # pragma: no cover + from typing_extensions import Literal, TypedDict +else: + from typing import Literal, TypedDict # pragma: no cover + + +class ParserError(ValueError): + pass + + +# Allows for ParserErrors to be propagated from _build_datetime() +# when day_of_year errors occur. +# Before this, the ParserErrors were caught by the try/except in +# _parse_multiformat() and the appropriate error message was not +# transmitted to the user. +class ParserMatchError(ParserError): + pass + + +_WEEKDATE_ELEMENT = Union[str, bytes, SupportsInt, bytearray] + +_FORMAT_TYPE = Literal[ + "YYYY", + "YY", + "MM", + "M", + "DDDD", + "DDD", + "DD", + "D", + "HH", + "H", + "hh", + "h", + "mm", + "m", + "ss", + "s", + "X", + "x", + "ZZZ", + "ZZ", + "Z", + "S", + "W", + "MMMM", + "MMM", + "Do", + "dddd", + "ddd", + "d", + "a", + "A", +] + + +class _Parts(TypedDict, total=False): + year: int + month: int + day_of_year: int + day: int + hour: int + minute: int + second: int + microsecond: int + timestamp: float + expanded_timestamp: int + tzinfo: dt_tzinfo + am_pm: Literal["am", "pm"] + day_of_week: int + weekdate: Tuple[_WEEKDATE_ELEMENT, _WEEKDATE_ELEMENT, Optional[_WEEKDATE_ELEMENT]] + + +class DateTimeParser: + _FORMAT_RE: ClassVar[Pattern[str]] = re.compile( + r"(YYY?Y?|MM?M?M?|Do|DD?D?D?|d?d?d?d|HH?|hh?|mm?|ss?|S+|ZZ?Z?|a|A|x|X|W)" + ) + _ESCAPE_RE: ClassVar[Pattern[str]] = re.compile(r"\[[^\[\]]*\]") + + _ONE_OR_TWO_DIGIT_RE: ClassVar[Pattern[str]] = re.compile(r"\d{1,2}") + _ONE_OR_TWO_OR_THREE_DIGIT_RE: ClassVar[Pattern[str]] = re.compile(r"\d{1,3}") + _ONE_OR_MORE_DIGIT_RE: ClassVar[Pattern[str]] = re.compile(r"\d+") + _TWO_DIGIT_RE: ClassVar[Pattern[str]] = re.compile(r"\d{2}") + _THREE_DIGIT_RE: ClassVar[Pattern[str]] = re.compile(r"\d{3}") + _FOUR_DIGIT_RE: ClassVar[Pattern[str]] = re.compile(r"\d{4}") + _TZ_Z_RE: ClassVar[Pattern[str]] = re.compile(r"([\+\-])(\d{2})(?:(\d{2}))?|Z") + _TZ_ZZ_RE: ClassVar[Pattern[str]] = re.compile(r"([\+\-])(\d{2})(?:\:(\d{2}))?|Z") + _TZ_NAME_RE: ClassVar[Pattern[str]] = re.compile(r"\w[\w+\-/]+") + # NOTE: timestamps cannot be parsed from natural language strings (by removing the ^...$) because it will + # break cases like "15 Jul 2000" and a format list (see issue #447) + _TIMESTAMP_RE: ClassVar[Pattern[str]] = re.compile(r"^\-?\d+\.?\d+$") + _TIMESTAMP_EXPANDED_RE: ClassVar[Pattern[str]] = re.compile(r"^\-?\d+$") + _TIME_RE: ClassVar[Pattern[str]] = re.compile( + r"^(\d{2})(?:\:?(\d{2}))?(?:\:?(\d{2}))?(?:([\.\,])(\d+))?$" + ) + _WEEK_DATE_RE: ClassVar[Pattern[str]] = re.compile( + r"(?P\d{4})[\-]?W(?P\d{2})[\-]?(?P\d)?" + ) + + _BASE_INPUT_RE_MAP: ClassVar[Dict[_FORMAT_TYPE, Pattern[str]]] = { + "YYYY": _FOUR_DIGIT_RE, + "YY": _TWO_DIGIT_RE, + "MM": _TWO_DIGIT_RE, + "M": _ONE_OR_TWO_DIGIT_RE, + "DDDD": _THREE_DIGIT_RE, + "DDD": _ONE_OR_TWO_OR_THREE_DIGIT_RE, + "DD": _TWO_DIGIT_RE, + "D": _ONE_OR_TWO_DIGIT_RE, + "HH": _TWO_DIGIT_RE, + "H": _ONE_OR_TWO_DIGIT_RE, + "hh": _TWO_DIGIT_RE, + "h": _ONE_OR_TWO_DIGIT_RE, + "mm": _TWO_DIGIT_RE, + "m": _ONE_OR_TWO_DIGIT_RE, + "ss": _TWO_DIGIT_RE, + "s": _ONE_OR_TWO_DIGIT_RE, + "X": _TIMESTAMP_RE, + "x": _TIMESTAMP_EXPANDED_RE, + "ZZZ": _TZ_NAME_RE, + "ZZ": _TZ_ZZ_RE, + "Z": _TZ_Z_RE, + "S": _ONE_OR_MORE_DIGIT_RE, + "W": _WEEK_DATE_RE, + } + + SEPARATORS: ClassVar[List[str]] = ["-", "/", "."] + + locale: locales.Locale + _input_re_map: Dict[_FORMAT_TYPE, Pattern[str]] + + def __init__(self, locale: str = DEFAULT_LOCALE, cache_size: int = 0) -> None: + + self.locale = locales.get_locale(locale) + self._input_re_map = self._BASE_INPUT_RE_MAP.copy() + self._input_re_map.update( + { + "MMMM": self._generate_choice_re( + self.locale.month_names[1:], re.IGNORECASE + ), + "MMM": self._generate_choice_re( + self.locale.month_abbreviations[1:], re.IGNORECASE + ), + "Do": re.compile(self.locale.ordinal_day_re), + "dddd": self._generate_choice_re( + self.locale.day_names[1:], re.IGNORECASE + ), + "ddd": self._generate_choice_re( + self.locale.day_abbreviations[1:], re.IGNORECASE + ), + "d": re.compile(r"[1-7]"), + "a": self._generate_choice_re( + (self.locale.meridians["am"], self.locale.meridians["pm"]) + ), + # note: 'A' token accepts both 'am/pm' and 'AM/PM' formats to + # ensure backwards compatibility of this token + "A": self._generate_choice_re(self.locale.meridians.values()), + } + ) + if cache_size > 0: + self._generate_pattern_re = lru_cache(maxsize=cache_size)( # type: ignore + self._generate_pattern_re + ) + + # TODO: since we support more than ISO 8601, we should rename this function + # IDEA: break into multiple functions + def parse_iso( + self, datetime_string: str, normalize_whitespace: bool = False + ) -> datetime: + + if normalize_whitespace: + datetime_string = re.sub(r"\s+", " ", datetime_string.strip()) + + has_space_divider = " " in datetime_string + has_t_divider = "T" in datetime_string + + num_spaces = datetime_string.count(" ") + if has_space_divider and num_spaces != 1 or has_t_divider and num_spaces > 0: + raise ParserError( + f"Expected an ISO 8601-like string, but was given {datetime_string!r}. " + "Try passing in a format string to resolve this." + ) + + has_time = has_space_divider or has_t_divider + has_tz = False + + # date formats (ISO 8601 and others) to test against + # NOTE: YYYYMM is omitted to avoid confusion with YYMMDD (no longer part of ISO 8601, but is still often used) + formats = [ + "YYYY-MM-DD", + "YYYY-M-DD", + "YYYY-M-D", + "YYYY/MM/DD", + "YYYY/M/DD", + "YYYY/M/D", + "YYYY.MM.DD", + "YYYY.M.DD", + "YYYY.M.D", + "YYYYMMDD", + "YYYY-DDDD", + "YYYYDDDD", + "YYYY-MM", + "YYYY/MM", + "YYYY.MM", + "YYYY", + "W", + ] + + if has_time: + + if has_space_divider: + date_string, time_string = datetime_string.split(" ", 1) + else: + date_string, time_string = datetime_string.split("T", 1) + + time_parts = re.split(r"[\+\-Z]", time_string, 1, re.IGNORECASE) + + time_components: Optional[Match[str]] = self._TIME_RE.match(time_parts[0]) + + if time_components is None: + raise ParserError( + "Invalid time component provided. " + "Please specify a format or provide a valid time component in the basic or extended ISO 8601 time format." + ) + + ( + hours, + minutes, + seconds, + subseconds_sep, + subseconds, + ) = time_components.groups() + + has_tz = len(time_parts) == 2 + has_minutes = minutes is not None + has_seconds = seconds is not None + has_subseconds = subseconds is not None + + is_basic_time_format = ":" not in time_parts[0] + tz_format = "Z" + + # use 'ZZ' token instead since tz offset is present in non-basic format + if has_tz and ":" in time_parts[1]: + tz_format = "ZZ" + + time_sep = "" if is_basic_time_format else ":" + + if has_subseconds: + time_string = "HH{time_sep}mm{time_sep}ss{subseconds_sep}S".format( + time_sep=time_sep, subseconds_sep=subseconds_sep + ) + elif has_seconds: + time_string = "HH{time_sep}mm{time_sep}ss".format(time_sep=time_sep) + elif has_minutes: + time_string = f"HH{time_sep}mm" + else: + time_string = "HH" + + if has_space_divider: + formats = [f"{f} {time_string}" for f in formats] + else: + formats = [f"{f}T{time_string}" for f in formats] + + if has_time and has_tz: + # Add "Z" or "ZZ" to the format strings to indicate to + # _parse_token() that a timezone needs to be parsed + formats = [f"{f}{tz_format}" for f in formats] + + return self._parse_multiformat(datetime_string, formats) + + def parse( + self, + datetime_string: str, + fmt: Union[List[str], str], + normalize_whitespace: bool = False, + ) -> datetime: + + if normalize_whitespace: + datetime_string = re.sub(r"\s+", " ", datetime_string) + + if isinstance(fmt, list): + return self._parse_multiformat(datetime_string, fmt) + + try: + fmt_tokens: List[_FORMAT_TYPE] + fmt_pattern_re: Pattern[str] + fmt_tokens, fmt_pattern_re = self._generate_pattern_re(fmt) + except re.error as e: + raise ParserMatchError( + f"Failed to generate regular expression pattern: {e}." + ) + + match = fmt_pattern_re.search(datetime_string) + + if match is None: + raise ParserMatchError( + f"Failed to match {fmt!r} when parsing {datetime_string!r}." + ) + + parts: _Parts = {} + for token in fmt_tokens: + value: Union[Tuple[str, str, str], str] + if token == "Do": + value = match.group("value") + elif token == "W": + value = (match.group("year"), match.group("week"), match.group("day")) + else: + value = match.group(token) + + if value is None: + raise ParserMatchError( + f"Unable to find a match group for the specified token {token!r}." + ) + + self._parse_token(token, value, parts) # type: ignore + + return self._build_datetime(parts) + + def _generate_pattern_re(self, fmt: str) -> Tuple[List[_FORMAT_TYPE], Pattern[str]]: + + # fmt is a string of tokens like 'YYYY-MM-DD' + # we construct a new string by replacing each + # token by its pattern: + # 'YYYY-MM-DD' -> '(?P\d{4})-(?P\d{2})-(?P
\d{2})' + tokens: List[_FORMAT_TYPE] = [] + offset = 0 + + # Escape all special RegEx chars + escaped_fmt = re.escape(fmt) + + # Extract the bracketed expressions to be reinserted later. + escaped_fmt = re.sub(self._ESCAPE_RE, "#", escaped_fmt) + + # Any number of S is the same as one. + # TODO: allow users to specify the number of digits to parse + escaped_fmt = re.sub(r"S+", "S", escaped_fmt) + + escaped_data = re.findall(self._ESCAPE_RE, fmt) + + fmt_pattern = escaped_fmt + + for m in self._FORMAT_RE.finditer(escaped_fmt): + token: _FORMAT_TYPE = cast(_FORMAT_TYPE, m.group(0)) + try: + input_re = self._input_re_map[token] + except KeyError: + raise ParserError(f"Unrecognized token {token!r}.") + input_pattern = f"(?P<{token}>{input_re.pattern})" + tokens.append(token) + # a pattern doesn't have the same length as the token + # it replaces! We keep the difference in the offset variable. + # This works because the string is scanned left-to-right and matches + # are returned in the order found by finditer. + fmt_pattern = ( + fmt_pattern[: m.start() + offset] + + input_pattern + + fmt_pattern[m.end() + offset :] + ) + offset += len(input_pattern) - (m.end() - m.start()) + + final_fmt_pattern = "" + split_fmt = fmt_pattern.split(r"\#") + + # Due to the way Python splits, 'split_fmt' will always be longer + for i in range(len(split_fmt)): + final_fmt_pattern += split_fmt[i] + if i < len(escaped_data): + final_fmt_pattern += escaped_data[i][1:-1] + + # Wrap final_fmt_pattern in a custom word boundary to strictly + # match the formatting pattern and filter out date and time formats + # that include junk such as: blah1998-09-12 blah, blah 1998-09-12blah, + # blah1998-09-12blah. The custom word boundary matches every character + # that is not a whitespace character to allow for searching for a date + # and time string in a natural language sentence. Therefore, searching + # for a string of the form YYYY-MM-DD in "blah 1998-09-12 blah" will + # work properly. + # Certain punctuation before or after the target pattern such as + # "1998-09-12," is permitted. For the full list of valid punctuation, + # see the documentation. + + starting_word_boundary = ( + r"(?\s])" # This is the list of punctuation that is ok before the + # pattern (i.e. "It can't not be these characters before the pattern") + r"(\b|^)" + # The \b is to block cases like 1201912 but allow 201912 for pattern YYYYMM. The ^ was necessary to allow a + # negative number through i.e. before epoch numbers + ) + ending_word_boundary = ( + r"(?=[\,\.\;\:\?\!\"\'\`\[\]\{\}\(\)\<\>]?" # Positive lookahead stating that these punctuation marks + # can appear after the pattern at most 1 time + r"(?!\S))" # Don't allow any non-whitespace character after the punctuation + ) + bounded_fmt_pattern = r"{}{}{}".format( + starting_word_boundary, final_fmt_pattern, ending_word_boundary + ) + + return tokens, re.compile(bounded_fmt_pattern, flags=re.IGNORECASE) + + @overload + def _parse_token( + self, + token: Literal[ + "YYYY", + "YY", + "MM", + "M", + "DDDD", + "DDD", + "DD", + "D", + "Do", + "HH", + "hh", + "h", + "H", + "mm", + "m", + "ss", + "s", + "x", + ], + value: Union[str, bytes, SupportsInt, bytearray], + parts: _Parts, + ) -> None: + ... # pragma: no cover + + @overload + def _parse_token( + self, + token: Literal["X"], + value: Union[str, bytes, SupportsFloat, bytearray], + parts: _Parts, + ) -> None: + ... # pragma: no cover + + @overload + def _parse_token( + self, + token: Literal["MMMM", "MMM", "dddd", "ddd", "S"], + value: Union[str, bytes, bytearray], + parts: _Parts, + ) -> None: + ... # pragma: no cover + + @overload + def _parse_token( + self, + token: Literal["a", "A", "ZZZ", "ZZ", "Z"], + value: Union[str, bytes], + parts: _Parts, + ) -> None: + ... # pragma: no cover + + @overload + def _parse_token( + self, + token: Literal["W"], + value: Tuple[_WEEKDATE_ELEMENT, _WEEKDATE_ELEMENT, Optional[_WEEKDATE_ELEMENT]], + parts: _Parts, + ) -> None: + ... # pragma: no cover + + def _parse_token( + self, + token: Any, + value: Any, + parts: _Parts, + ) -> None: + + if token == "YYYY": + parts["year"] = int(value) + + elif token == "YY": + value = int(value) + parts["year"] = 1900 + value if value > 68 else 2000 + value + + elif token in ["MMMM", "MMM"]: + # FIXME: month_number() is nullable + parts["month"] = self.locale.month_number(value.lower()) # type: ignore + + elif token in ["MM", "M"]: + parts["month"] = int(value) + + elif token in ["DDDD", "DDD"]: + parts["day_of_year"] = int(value) + + elif token in ["DD", "D"]: + parts["day"] = int(value) + + elif token == "Do": + parts["day"] = int(value) + + elif token == "dddd": + # locale day names are 1-indexed + day_of_week = [x.lower() for x in self.locale.day_names].index( + value.lower() + ) + parts["day_of_week"] = day_of_week - 1 + + elif token == "ddd": + # locale day abbreviations are 1-indexed + day_of_week = [x.lower() for x in self.locale.day_abbreviations].index( + value.lower() + ) + parts["day_of_week"] = day_of_week - 1 + + elif token.upper() in ["HH", "H"]: + parts["hour"] = int(value) + + elif token in ["mm", "m"]: + parts["minute"] = int(value) + + elif token in ["ss", "s"]: + parts["second"] = int(value) + + elif token == "S": + # We have the *most significant* digits of an arbitrary-precision integer. + # We want the six most significant digits as an integer, rounded. + # IDEA: add nanosecond support somehow? Need datetime support for it first. + value = value.ljust(7, "0") + + # floating-point (IEEE-754) defaults to half-to-even rounding + seventh_digit = int(value[6]) + if seventh_digit == 5: + rounding = int(value[5]) % 2 + elif seventh_digit > 5: + rounding = 1 + else: + rounding = 0 + + parts["microsecond"] = int(value[:6]) + rounding + + elif token == "X": + parts["timestamp"] = float(value) + + elif token == "x": + parts["expanded_timestamp"] = int(value) + + elif token in ["ZZZ", "ZZ", "Z"]: + parts["tzinfo"] = TzinfoParser.parse(value) + + elif token in ["a", "A"]: + if value in (self.locale.meridians["am"], self.locale.meridians["AM"]): + parts["am_pm"] = "am" + if "hour" in parts and not 0 <= parts["hour"] <= 12: + raise ParserMatchError( + f"Hour token value must be between 0 and 12 inclusive for token {token!r}." + ) + elif value in (self.locale.meridians["pm"], self.locale.meridians["PM"]): + parts["am_pm"] = "pm" + elif token == "W": + parts["weekdate"] = value + + @staticmethod + def _build_datetime(parts: _Parts) -> datetime: + weekdate = parts.get("weekdate") + + if weekdate is not None: + + year, week = int(weekdate[0]), int(weekdate[1]) + + if weekdate[2] is not None: + _day = int(weekdate[2]) + else: + # day not given, default to 1 + _day = 1 + + date_string = f"{year}-{week}-{_day}" + + # tokens for ISO 8601 weekdates + dt = datetime.strptime(date_string, "%G-%V-%u") + + parts["year"] = dt.year + parts["month"] = dt.month + parts["day"] = dt.day + + timestamp = parts.get("timestamp") + + if timestamp is not None: + return datetime.fromtimestamp(timestamp, tz=tz.tzutc()) + + expanded_timestamp = parts.get("expanded_timestamp") + + if expanded_timestamp is not None: + return datetime.fromtimestamp( + normalize_timestamp(expanded_timestamp), + tz=tz.tzutc(), + ) + + day_of_year = parts.get("day_of_year") + + if day_of_year is not None: + _year = parts.get("year") + month = parts.get("month") + if _year is None: + raise ParserError( + "Year component is required with the DDD and DDDD tokens." + ) + + if month is not None: + raise ParserError( + "Month component is not allowed with the DDD and DDDD tokens." + ) + + date_string = f"{_year}-{day_of_year}" + try: + dt = datetime.strptime(date_string, "%Y-%j") + except ValueError: + raise ParserError( + f"The provided day of year {day_of_year!r} is invalid." + ) + + parts["year"] = dt.year + parts["month"] = dt.month + parts["day"] = dt.day + + day_of_week: Optional[int] = parts.get("day_of_week") + day = parts.get("day") + + # If day is passed, ignore day of week + if day_of_week is not None and day is None: + year = parts.get("year", 1970) + month = parts.get("month", 1) + day = 1 + + # dddd => first day of week after epoch + # dddd YYYY => first day of week in specified year + # dddd MM YYYY => first day of week in specified year and month + # dddd MM => first day after epoch in specified month + next_weekday_dt = next_weekday(datetime(year, month, day), day_of_week) + parts["year"] = next_weekday_dt.year + parts["month"] = next_weekday_dt.month + parts["day"] = next_weekday_dt.day + + am_pm = parts.get("am_pm") + hour = parts.get("hour", 0) + + if am_pm == "pm" and hour < 12: + hour += 12 + elif am_pm == "am" and hour == 12: + hour = 0 + + # Support for midnight at the end of day + if hour == 24: + if parts.get("minute", 0) != 0: + raise ParserError("Midnight at the end of day must not contain minutes") + if parts.get("second", 0) != 0: + raise ParserError("Midnight at the end of day must not contain seconds") + if parts.get("microsecond", 0) != 0: + raise ParserError( + "Midnight at the end of day must not contain microseconds" + ) + hour = 0 + day_increment = 1 + else: + day_increment = 0 + + # account for rounding up to 1000000 + microsecond = parts.get("microsecond", 0) + if microsecond == 1000000: + microsecond = 0 + second_increment = 1 + else: + second_increment = 0 + + increment = timedelta(days=day_increment, seconds=second_increment) + + return ( + datetime( + year=parts.get("year", 1), + month=parts.get("month", 1), + day=parts.get("day", 1), + hour=hour, + minute=parts.get("minute", 0), + second=parts.get("second", 0), + microsecond=microsecond, + tzinfo=parts.get("tzinfo"), + ) + + increment + ) + + def _parse_multiformat(self, string: str, formats: Iterable[str]) -> datetime: + + _datetime: Optional[datetime] = None + + for fmt in formats: + try: + _datetime = self.parse(string, fmt) + break + except ParserMatchError: + pass + + if _datetime is None: + supported_formats = ", ".join(formats) + raise ParserError( + f"Could not match input {string!r} to any of the following formats: {supported_formats}." + ) + + return _datetime + + # generates a capture group of choices separated by an OR operator + @staticmethod + def _generate_choice_re( + choices: Iterable[str], flags: Union[int, re.RegexFlag] = 0 + ) -> Pattern[str]: + return re.compile(r"({})".format("|".join(choices)), flags=flags) + + +class TzinfoParser: + _TZINFO_RE: ClassVar[Pattern[str]] = re.compile( + r"^([\+\-])?(\d{2})(?:\:?(\d{2}))?$" + ) + + @classmethod + def parse(cls, tzinfo_string: str) -> dt_tzinfo: + + tzinfo: Optional[dt_tzinfo] = None + + if tzinfo_string == "local": + tzinfo = tz.tzlocal() + + elif tzinfo_string in ["utc", "UTC", "Z"]: + tzinfo = tz.tzutc() + + else: + + iso_match = cls._TZINFO_RE.match(tzinfo_string) + + if iso_match: + sign: Optional[str] + hours: str + minutes: Union[str, int, None] + sign, hours, minutes = iso_match.groups() + seconds = int(hours) * 3600 + int(minutes or 0) * 60 + + if sign == "-": + seconds *= -1 + + tzinfo = tz.tzoffset(None, seconds) + + else: + tzinfo = tz.gettz(tzinfo_string) + + if tzinfo is None: + raise ParserError(f"Could not parse timezone expression {tzinfo_string!r}.") + + return tzinfo diff --git a/third_party/python/arrow/arrow/py.typed b/third_party/python/arrow/arrow/py.typed new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/third_party/python/arrow/arrow/util.py b/third_party/python/arrow/arrow/util.py new file mode 100644 index 0000000000000..f3eaa21c9b46c --- /dev/null +++ b/third_party/python/arrow/arrow/util.py @@ -0,0 +1,117 @@ +"""Helpful functions used internally within arrow.""" + +import datetime +from typing import Any, Optional, cast + +from dateutil.rrule import WEEKLY, rrule + +from arrow.constants import ( + MAX_ORDINAL, + MAX_TIMESTAMP, + MAX_TIMESTAMP_MS, + MAX_TIMESTAMP_US, + MIN_ORDINAL, +) + + +def next_weekday( + start_date: Optional[datetime.date], weekday: int +) -> datetime.datetime: + """Get next weekday from the specified start date. + + :param start_date: Datetime object representing the start date. + :param weekday: Next weekday to obtain. Can be a value between 0 (Monday) and 6 (Sunday). + :return: Datetime object corresponding to the next weekday after start_date. + + Usage:: + + # Get first Monday after epoch + >>> next_weekday(datetime(1970, 1, 1), 0) + 1970-01-05 00:00:00 + + # Get first Thursday after epoch + >>> next_weekday(datetime(1970, 1, 1), 3) + 1970-01-01 00:00:00 + + # Get first Sunday after epoch + >>> next_weekday(datetime(1970, 1, 1), 6) + 1970-01-04 00:00:00 + """ + if weekday < 0 or weekday > 6: + raise ValueError("Weekday must be between 0 (Monday) and 6 (Sunday).") + return cast( + datetime.datetime, + rrule(freq=WEEKLY, dtstart=start_date, byweekday=weekday, count=1)[0], + ) + + +def is_timestamp(value: Any) -> bool: + """Check if value is a valid timestamp.""" + if isinstance(value, bool): + return False + if not isinstance(value, (int, float, str)): + return False + try: + float(value) + return True + except ValueError: + return False + + +def validate_ordinal(value: Any) -> None: + """Raise an exception if value is an invalid Gregorian ordinal. + + :param value: the input to be checked + + """ + if isinstance(value, bool) or not isinstance(value, int): + raise TypeError(f"Ordinal must be an integer (got type {type(value)}).") + if not (MIN_ORDINAL <= value <= MAX_ORDINAL): + raise ValueError(f"Ordinal {value} is out of range.") + + +def normalize_timestamp(timestamp: float) -> float: + """Normalize millisecond and microsecond timestamps into normal timestamps.""" + if timestamp > MAX_TIMESTAMP: + if timestamp < MAX_TIMESTAMP_MS: + timestamp /= 1000 + elif timestamp < MAX_TIMESTAMP_US: + timestamp /= 1_000_000 + else: + raise ValueError(f"The specified timestamp {timestamp!r} is too large.") + return timestamp + + +# Credit to https://stackoverflow.com/a/1700069 +def iso_to_gregorian(iso_year: int, iso_week: int, iso_day: int) -> datetime.date: + """Converts an ISO week date into a datetime object. + + :param iso_year: the year + :param iso_week: the week number, each year has either 52 or 53 weeks + :param iso_day: the day numbered 1 through 7, beginning with Monday + + """ + + if not 1 <= iso_week <= 53: + raise ValueError("ISO Calendar week value must be between 1-53.") + + if not 1 <= iso_day <= 7: + raise ValueError("ISO Calendar day value must be between 1-7") + + # The first week of the year always contains 4 Jan. + fourth_jan = datetime.date(iso_year, 1, 4) + delta = datetime.timedelta(fourth_jan.isoweekday() - 1) + year_start = fourth_jan - delta + gregorian = year_start + datetime.timedelta(days=iso_day - 1, weeks=iso_week - 1) + + return gregorian + + +def validate_bounds(bounds: str) -> None: + if bounds != "()" and bounds != "(]" and bounds != "[)" and bounds != "[]": + raise ValueError( + "Invalid bounds. Please select between '()', '(]', '[)', or '[]'." + ) + + +__all__ = ["next_weekday", "is_timestamp", "validate_ordinal", "iso_to_gregorian"] diff --git a/third_party/python/binaryornot/binaryornot-0.4.4.dist-info/DESCRIPTION.rst b/third_party/python/binaryornot/binaryornot-0.4.4.dist-info/DESCRIPTION.rst new file mode 100644 index 0000000000000..4ef0073431d86 --- /dev/null +++ b/third_party/python/binaryornot/binaryornot-0.4.4.dist-info/DESCRIPTION.rst @@ -0,0 +1,152 @@ +============================= +BinaryOrNot +============================= + +.. image:: https://img.shields.io/pypi/v/binaryornot.svg?style=flat + :target: https://pypi.python.org/pypi/binaryornot + +.. image:: https://readthedocs.org/projects/binaryornot/badge/?version=latest + :target: http://binaryornot.readthedocs.io/en/latest/?badge=latest + :alt: Documentation Status + + +.. image:: https://pyup.io/repos/github/audreyr/binaryornot/shield.svg + :target: https://pyup.io/repos/github/audreyr/binaryornot/ + :alt: Updates + +Ultra-lightweight pure Python package to guess whether a file is binary or text, +using a heuristic similar to Perl's `pp_fttext` and its analysis by @eliben. + +* Free software: BSD license +* Documentation: https://binaryornot.readthedocs.io + +Status +------ + +It works, and people are using this package in various places. But it doesn't cover all edge cases yet. + +The code could be improved. Pull requests welcome! As of now, it is based on these snippets, but that may change: + +* http://stackoverflow.com/questions/898669/how-can-i-detect-if-a-file-is-binary-non-text-in-python +* http://stackoverflow.com/questions/1446549/how-to-identify-binary-and-text-files-using-python +* http://code.activestate.com/recipes/173220/ +* http://eli.thegreenplace.net/2011/10/19/perls-guess-if-file-is-text-or-binary-implemented-in-python/ + +Features +-------- + +Has tests for these file types: + +* Text: .txt, .css, .json, .svg, .js, .lua, .pl, .rst +* Binary: .png, .gif, .jpg, .tiff, .bmp, .DS_Store, .eot, .otf, .ttf, .woff, .rgb + +Has tests for numerous encodings. + +Why? +---- + +You may be thinking, "I can write this in 2 lines of code?!" + +It's actually not that easy. Here's a great article about how Perl's +heuristic to guess file types works: http://eli.thegreenplace.net/2011/10/19/perls-guess-if-file-is-text-or-binary-implemented-in-python/ + +And that's just where we started. Over time, we've found more edge cases and +our heuristic has gotten more complex. + +Also, this package saves you from having to write and thoroughly test +your code with all sorts of weird file types and encodings, cross-platform. + +Builds +------ + +Linux (Ubuntu 12.04 LTS Server Edition 64 bit): + +.. image:: https://img.shields.io/travis/audreyr/binaryornot/master.svg + :target: https://travis-ci.org/audreyr/binaryornot + +Windows (Windows Server 2012 R2 (x64)): + +.. image:: https://img.shields.io/appveyor/ci/audreyr/binaryornot/master.svg + :target: https://ci.appveyor.com/project/audreyr/binaryornot + +Credits +------- + +* Special thanks to Eli Bendersky (@eliben) for his writeup explaining the heuristic and his implementation, which this is largely based on. +* Source code from the portion of Perl's `pp_fttext` that checks for textiness: https://github.com/Perl/perl5/blob/v5.23.1/pp_sys.c#L3527-L3587 + + + + +History +------- + +0.4.4 (2017-04-13) +~~~~~~~~~~~~~~~~~~ + +* Notify users for file i/o issues. Thanks @lukehinds! + + +0.4.3 (2017-04-13) +~~~~~~~~~~~~~~~~~~ + +* Restricted chardet to anything 3.0.2 or higher due to https://github.com/chardet/chardet/issues/113. Thanks @dan-blanchard for the quick fix! + +0.4.2 (2017-04-12) +~~~~~~~~~~~~~~~~~~ + +* Restricted chardet to anything under 3.0 due to https://github.com/chardet/chardet/issues/113 +* Added pyup badge +* Added utilities for pushing new versions up + +0.4.0 (2015-08-21) +~~~~~~~~~~~~~~~~~~ + +* Enhanced detection for some binary streams and UTF texts. (#10, 11) Thanks `@pombredanne`_. +* Set up Appveyor for continuous testing on Windows. Thanks `@pydanny`_. +* Update link to Perl source implementation. (#9) Thanks `@asmeurer`_ `@pombredanne`_ `@audreyr`_. +* Handle UnicodeDecodeError in check. (#12) Thanks `@DRMacIver`_. +* Add very simple Hypothesis based tests. (#13) Thanks `@DRMacIver`_. +* Use setup to determine requirements and remove redundant requirements.txt. (#14) Thanks `@hackebrot`_. +* Add documentation status badge to README.rst. (#15) Thanks `@hackebrot`_. +* Run tox in travis.yml. Add pypy and Python 3.4 to tox environments. (#16) Thanks `@hackebrot`_ `@pydanny`_. +* Handle LookupError when detecting encoding. (#17) Thanks `@DRMacIver`_. + + +.. _`@pombredanne`: https://github.com/pombredanne +.. _`@pydanny`: https://github.com/pydanny +.. _`@asmeurer`: https://github.com/asmeurer +.. _`@audreyr`: https://github.com/audreyr +.. _`@DRMacIver`: https://github.com/DRMacIver +.. _`@hackebrot`: https://github.com/hackebrot + +0.3.0 (2014-05-05) +~~~~~~~~~~~~~~~~~~ + +* Include tests, docs in source package. (#6) Thanks `@vincentbernat`_. +* Drop unnecessary shebangs and executable bits. (#8) Thanks `@scop`_. +* Generate string of printable extended ASCII bytes only once. (#7) Thanks `@scop`_. +* Make number of bytes to read parametrizable. (#7) Thanks `@scop`_. + +.. _`@vincentbernat`: https://github.com/vincentbernat +.. _`@scop`: https://github.com/scop + +0.2.0 (2013-09-22) +~~~~~~~~~~~~~~~~~~ + +* Complete rewrite of everything. Thanks `@ncoghlan`_. + +.. _`@ncoghlan`: https://github.com/ncoghlan + +0.1.1 (2013-08-17) +~~~~~~~~~~~~~~~~~~ + +* Tests pass under Python 2.6, 2.7, 3.3, PyPy. + + +0.1.0 (2013-08-17) +~~~~~~~~~~~~~~~~~~ + +* First release on PyPI. + + diff --git a/third_party/python/binaryornot/binaryornot-0.4.4.dist-info/METADATA b/third_party/python/binaryornot/binaryornot-0.4.4.dist-info/METADATA new file mode 100644 index 0000000000000..cabfe745870fa --- /dev/null +++ b/third_party/python/binaryornot/binaryornot-0.4.4.dist-info/METADATA @@ -0,0 +1,175 @@ +Metadata-Version: 2.0 +Name: binaryornot +Version: 0.4.4 +Summary: Ultra-lightweight pure Python package to check if a file is binary or text. +Home-page: https://github.com/audreyr/binaryornot +Author: Audrey Roy Greenfeld +Author-email: aroy@alum.mit.edu +License: BSD +Keywords: binaryornot +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Natural Language :: English +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Requires-Dist: chardet (>=3.0.2) + +============================= +BinaryOrNot +============================= + +.. image:: https://img.shields.io/pypi/v/binaryornot.svg?style=flat + :target: https://pypi.python.org/pypi/binaryornot + +.. image:: https://readthedocs.org/projects/binaryornot/badge/?version=latest + :target: http://binaryornot.readthedocs.io/en/latest/?badge=latest + :alt: Documentation Status + + +.. image:: https://pyup.io/repos/github/audreyr/binaryornot/shield.svg + :target: https://pyup.io/repos/github/audreyr/binaryornot/ + :alt: Updates + +Ultra-lightweight pure Python package to guess whether a file is binary or text, +using a heuristic similar to Perl's `pp_fttext` and its analysis by @eliben. + +* Free software: BSD license +* Documentation: https://binaryornot.readthedocs.io + +Status +------ + +It works, and people are using this package in various places. But it doesn't cover all edge cases yet. + +The code could be improved. Pull requests welcome! As of now, it is based on these snippets, but that may change: + +* http://stackoverflow.com/questions/898669/how-can-i-detect-if-a-file-is-binary-non-text-in-python +* http://stackoverflow.com/questions/1446549/how-to-identify-binary-and-text-files-using-python +* http://code.activestate.com/recipes/173220/ +* http://eli.thegreenplace.net/2011/10/19/perls-guess-if-file-is-text-or-binary-implemented-in-python/ + +Features +-------- + +Has tests for these file types: + +* Text: .txt, .css, .json, .svg, .js, .lua, .pl, .rst +* Binary: .png, .gif, .jpg, .tiff, .bmp, .DS_Store, .eot, .otf, .ttf, .woff, .rgb + +Has tests for numerous encodings. + +Why? +---- + +You may be thinking, "I can write this in 2 lines of code?!" + +It's actually not that easy. Here's a great article about how Perl's +heuristic to guess file types works: http://eli.thegreenplace.net/2011/10/19/perls-guess-if-file-is-text-or-binary-implemented-in-python/ + +And that's just where we started. Over time, we've found more edge cases and +our heuristic has gotten more complex. + +Also, this package saves you from having to write and thoroughly test +your code with all sorts of weird file types and encodings, cross-platform. + +Builds +------ + +Linux (Ubuntu 12.04 LTS Server Edition 64 bit): + +.. image:: https://img.shields.io/travis/audreyr/binaryornot/master.svg + :target: https://travis-ci.org/audreyr/binaryornot + +Windows (Windows Server 2012 R2 (x64)): + +.. image:: https://img.shields.io/appveyor/ci/audreyr/binaryornot/master.svg + :target: https://ci.appveyor.com/project/audreyr/binaryornot + +Credits +------- + +* Special thanks to Eli Bendersky (@eliben) for his writeup explaining the heuristic and his implementation, which this is largely based on. +* Source code from the portion of Perl's `pp_fttext` that checks for textiness: https://github.com/Perl/perl5/blob/v5.23.1/pp_sys.c#L3527-L3587 + + + + +History +------- + +0.4.4 (2017-04-13) +~~~~~~~~~~~~~~~~~~ + +* Notify users for file i/o issues. Thanks @lukehinds! + + +0.4.3 (2017-04-13) +~~~~~~~~~~~~~~~~~~ + +* Restricted chardet to anything 3.0.2 or higher due to https://github.com/chardet/chardet/issues/113. Thanks @dan-blanchard for the quick fix! + +0.4.2 (2017-04-12) +~~~~~~~~~~~~~~~~~~ + +* Restricted chardet to anything under 3.0 due to https://github.com/chardet/chardet/issues/113 +* Added pyup badge +* Added utilities for pushing new versions up + +0.4.0 (2015-08-21) +~~~~~~~~~~~~~~~~~~ + +* Enhanced detection for some binary streams and UTF texts. (#10, 11) Thanks `@pombredanne`_. +* Set up Appveyor for continuous testing on Windows. Thanks `@pydanny`_. +* Update link to Perl source implementation. (#9) Thanks `@asmeurer`_ `@pombredanne`_ `@audreyr`_. +* Handle UnicodeDecodeError in check. (#12) Thanks `@DRMacIver`_. +* Add very simple Hypothesis based tests. (#13) Thanks `@DRMacIver`_. +* Use setup to determine requirements and remove redundant requirements.txt. (#14) Thanks `@hackebrot`_. +* Add documentation status badge to README.rst. (#15) Thanks `@hackebrot`_. +* Run tox in travis.yml. Add pypy and Python 3.4 to tox environments. (#16) Thanks `@hackebrot`_ `@pydanny`_. +* Handle LookupError when detecting encoding. (#17) Thanks `@DRMacIver`_. + + +.. _`@pombredanne`: https://github.com/pombredanne +.. _`@pydanny`: https://github.com/pydanny +.. _`@asmeurer`: https://github.com/asmeurer +.. _`@audreyr`: https://github.com/audreyr +.. _`@DRMacIver`: https://github.com/DRMacIver +.. _`@hackebrot`: https://github.com/hackebrot + +0.3.0 (2014-05-05) +~~~~~~~~~~~~~~~~~~ + +* Include tests, docs in source package. (#6) Thanks `@vincentbernat`_. +* Drop unnecessary shebangs and executable bits. (#8) Thanks `@scop`_. +* Generate string of printable extended ASCII bytes only once. (#7) Thanks `@scop`_. +* Make number of bytes to read parametrizable. (#7) Thanks `@scop`_. + +.. _`@vincentbernat`: https://github.com/vincentbernat +.. _`@scop`: https://github.com/scop + +0.2.0 (2013-09-22) +~~~~~~~~~~~~~~~~~~ + +* Complete rewrite of everything. Thanks `@ncoghlan`_. + +.. _`@ncoghlan`: https://github.com/ncoghlan + +0.1.1 (2013-08-17) +~~~~~~~~~~~~~~~~~~ + +* Tests pass under Python 2.6, 2.7, 3.3, PyPy. + + +0.1.0 (2013-08-17) +~~~~~~~~~~~~~~~~~~ + +* First release on PyPI. + + diff --git a/third_party/python/binaryornot/binaryornot-0.4.4.dist-info/RECORD b/third_party/python/binaryornot/binaryornot-0.4.4.dist-info/RECORD new file mode 100644 index 0000000000000..ce88709fdf597 --- /dev/null +++ b/third_party/python/binaryornot/binaryornot-0.4.4.dist-info/RECORD @@ -0,0 +1,9 @@ +binaryornot/__init__.py,sha256=XfSXEYNIhIlBmsiUBzn8nDNSUA-2P5iseHo6sD1ZlyE,80 +binaryornot/check.py,sha256=Doh9gd_DUYWdGpJ8CCMmu4A-bul3_kV3qnl4OsgDp10,756 +binaryornot/helpers.py,sha256=p6aDimVStPQKZeQNHKkQNneeD8jfT4qpm9hCiV_8jYU,4737 +binaryornot-0.4.4.dist-info/DESCRIPTION.rst,sha256=QGOp8ciWl3QJXmtUSCWTYNLFy4exWZ4IIieSuJ1YWmQ,5120 +binaryornot-0.4.4.dist-info/METADATA,sha256=ZbKKAHfl5XQchYqQmoQ6sE7ya3RfAyNBECqYmmtany8,5995 +binaryornot-0.4.4.dist-info/RECORD,, +binaryornot-0.4.4.dist-info/WHEEL,sha256=o2k-Qa-RMNIJmUdIc7KU6VWR_ErNRbWNlxDIpl7lm34,110 +binaryornot-0.4.4.dist-info/metadata.json,sha256=MDiTh918QrO8YioOU2Rjh-6NO2xehibwl5nSc0NJWWg,1022 +binaryornot-0.4.4.dist-info/top_level.txt,sha256=xSk7ScGP__GIh_D2caJfJk0oRzgpyyqjWiozi5_nvms,12 diff --git a/third_party/python/binaryornot/binaryornot-0.4.4.dist-info/WHEEL b/third_party/python/binaryornot/binaryornot-0.4.4.dist-info/WHEEL new file mode 100644 index 0000000000000..8b6dd1b5a884b --- /dev/null +++ b/third_party/python/binaryornot/binaryornot-0.4.4.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.29.0) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/third_party/python/binaryornot/binaryornot-0.4.4.dist-info/metadata.json b/third_party/python/binaryornot/binaryornot-0.4.4.dist-info/metadata.json new file mode 100644 index 0000000000000..ac09fa3963c41 --- /dev/null +++ b/third_party/python/binaryornot/binaryornot-0.4.4.dist-info/metadata.json @@ -0,0 +1 @@ +{"classifiers": ["Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "License :: OSI Approved :: BSD License", "Natural Language :: English", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6"], "extensions": {"python.details": {"contacts": [{"email": "aroy@alum.mit.edu", "name": "Audrey Roy Greenfeld", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}, "project_urls": {"Home": "https://github.com/audreyr/binaryornot"}}}, "extras": [], "generator": "bdist_wheel (0.29.0)", "keywords": ["binaryornot"], "license": "BSD", "metadata_version": "2.0", "name": "binaryornot", "run_requires": [{"requires": ["chardet (>=3.0.2)"]}], "summary": "Ultra-lightweight pure Python package to check if a file is binary or text.", "version": "0.4.4"} \ No newline at end of file diff --git a/third_party/python/binaryornot/binaryornot-0.4.4.dist-info/top_level.txt b/third_party/python/binaryornot/binaryornot-0.4.4.dist-info/top_level.txt new file mode 100644 index 0000000000000..62c9ba1d6a27f --- /dev/null +++ b/third_party/python/binaryornot/binaryornot-0.4.4.dist-info/top_level.txt @@ -0,0 +1 @@ +binaryornot diff --git a/third_party/python/binaryornot/binaryornot/__init__.py b/third_party/python/binaryornot/binaryornot/__init__.py new file mode 100644 index 0000000000000..518255b16b20d --- /dev/null +++ b/third_party/python/binaryornot/binaryornot/__init__.py @@ -0,0 +1,3 @@ +__author__ = 'Audrey Roy' +__email__ = 'audreyr@gmail.com' +__version__ = '0.4.4' diff --git a/third_party/python/binaryornot/binaryornot/check.py b/third_party/python/binaryornot/binaryornot/check.py new file mode 100644 index 0000000000000..a784e3a77f673 --- /dev/null +++ b/third_party/python/binaryornot/binaryornot/check.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- + +""" +binaryornot.check +----------------- + +Main code for checking if a file is binary or text. +""" + +import logging + +from .helpers import get_starting_chunk, is_binary_string + + +logger = logging.getLogger(__name__) + + +def is_binary(filename): + """ + :param filename: File to check. + :returns: True if it's a binary file, otherwise False. + """ + logger.debug('is_binary: %(filename)r', locals()) + + # Check if the file extension is in a list of known binary types + binary_extensions = ['.pyc', ] + for ext in binary_extensions: + if filename.endswith(ext): + return True + + # Check if the starting chunk is a binary string + chunk = get_starting_chunk(filename) + return is_binary_string(chunk) diff --git a/third_party/python/binaryornot/binaryornot/helpers.py b/third_party/python/binaryornot/binaryornot/helpers.py new file mode 100644 index 0000000000000..3f034a695aa76 --- /dev/null +++ b/third_party/python/binaryornot/binaryornot/helpers.py @@ -0,0 +1,132 @@ +# -*- coding: utf-8 -*- + + +""" +binaryornot.helpers +------------------- + +Helper utilities used by BinaryOrNot. +""" + +import chardet +import logging + + +logger = logging.getLogger(__name__) + + +def print_as_hex(s): + """ + Print a string as hex bytes. + """ + print(":".join("{0:x}".format(ord(c)) for c in s)) + + +def get_starting_chunk(filename, length=1024): + """ + :param filename: File to open and get the first little chunk of. + :param length: Number of bytes to read, default 1024. + :returns: Starting chunk of bytes. + """ + # Ensure we open the file in binary mode + try: + with open(filename, 'rb') as f: + chunk = f.read(length) + return chunk + except IOError as e: + print(e) + + +_control_chars = b'\n\r\t\f\b' +if bytes is str: + # Python 2 means we need to invoke chr() explicitly + _printable_ascii = _control_chars + b''.join(map(chr, range(32, 127))) + _printable_high_ascii = b''.join(map(chr, range(127, 256))) +else: + # Python 3 means bytes accepts integer input directly + _printable_ascii = _control_chars + bytes(range(32, 127)) + _printable_high_ascii = bytes(range(127, 256)) + + +def is_binary_string(bytes_to_check): + """ + Uses a simplified version of the Perl detection algorithm, + based roughly on Eli Bendersky's translation to Python: + http://eli.thegreenplace.net/2011/10/19/perls-guess-if-file-is-text-or-binary-implemented-in-python/ + + This is biased slightly more in favour of deeming files as text + files than the Perl algorithm, since all ASCII compatible character + sets are accepted as text, not just utf-8. + + :param bytes: A chunk of bytes to check. + :returns: True if appears to be a binary, otherwise False. + """ + + # Empty files are considered text files + if not bytes_to_check: + return False + + # Now check for a high percentage of ASCII control characters + # Binary if control chars are > 30% of the string + low_chars = bytes_to_check.translate(None, _printable_ascii) + nontext_ratio1 = float(len(low_chars)) / float(len(bytes_to_check)) + logger.debug('nontext_ratio1: %(nontext_ratio1)r', locals()) + + # and check for a low percentage of high ASCII characters: + # Binary if high ASCII chars are < 5% of the string + # From: https://en.wikipedia.org/wiki/UTF-8 + # If the bytes are random, the chances of a byte with the high bit set + # starting a valid UTF-8 character is only 6.64%. The chances of finding 7 + # of these without finding an invalid sequence is actually lower than the + # chance of the first three bytes randomly being the UTF-8 BOM. + + high_chars = bytes_to_check.translate(None, _printable_high_ascii) + nontext_ratio2 = float(len(high_chars)) / float(len(bytes_to_check)) + logger.debug('nontext_ratio2: %(nontext_ratio2)r', locals()) + + is_likely_binary = ( + (nontext_ratio1 > 0.3 and nontext_ratio2 < 0.05) or + (nontext_ratio1 > 0.8 and nontext_ratio2 > 0.8) + ) + logger.debug('is_likely_binary: %(is_likely_binary)r', locals()) + + # then check for binary for possible encoding detection with chardet + detected_encoding = chardet.detect(bytes_to_check) + logger.debug('detected_encoding: %(detected_encoding)r', locals()) + + # finally use all the check to decide binary or text + decodable_as_unicode = False + if (detected_encoding['confidence'] > 0.9 and + detected_encoding['encoding'] != 'ascii'): + try: + try: + bytes_to_check.decode(encoding=detected_encoding['encoding']) + except TypeError: + # happens only on Python 2.6 + unicode(bytes_to_check, encoding=detected_encoding['encoding']) # noqa + decodable_as_unicode = True + logger.debug('success: decodable_as_unicode: ' + '%(decodable_as_unicode)r', locals()) + except LookupError: + logger.debug('failure: could not look up encoding %(encoding)s', + detected_encoding) + except UnicodeDecodeError: + logger.debug('failure: decodable_as_unicode: ' + '%(decodable_as_unicode)r', locals()) + + logger.debug('failure: decodable_as_unicode: ' + '%(decodable_as_unicode)r', locals()) + if is_likely_binary: + if decodable_as_unicode: + return False + else: + return True + else: + if decodable_as_unicode: + return False + else: + if b'\x00' in bytes_to_check or b'\xff' in bytes_to_check: + # Check for NULL bytes last + logger.debug('has nulls:' + repr(b'\x00' in bytes_to_check)) + return True + return False diff --git a/third_party/python/cookiecutter/cookiecutter-2.1.1.dist-info/AUTHORS.md b/third_party/python/cookiecutter/cookiecutter-2.1.1.dist-info/AUTHORS.md new file mode 100644 index 0000000000000..9e5a014943aef --- /dev/null +++ b/third_party/python/cookiecutter/cookiecutter-2.1.1.dist-info/AUTHORS.md @@ -0,0 +1,215 @@ +# Credits + +## Development Leads + +- Audrey Roy Greenfeld ([@audreyfeldroy](https://github.com/audreyfeldroy)) +- Daniel Roy Greenfeld ([@pydanny](https://github.com/pydanny)) +- Raphael Pierzina ([@hackebrot](https://github.com/hackebrot)) + +## Core Committers + +- Michael Joseph ([@michaeljoseph](https://github.com/michaeljoseph)) +- Paul Moore ([@pfmoore](https://github.com/pfmoore)) +- Andrey Shpak ([@insspb](https://github.com/insspb)) +- Sorin Sbarnea ([@ssbarnea](https://github.com/ssbarnea)) +- Fábio C. Barrionuevo da Luz ([@luzfcb](https://github.com/luzfcb)) +- Simone Basso ([@simobasso](https://github.com/simobasso)) +- Jens Klein ([@jensens](https://github.com/jensens)) +- Érico Andrei ([@ericof](https://github.com/ericof)) + +## Contributors + +- Steven Loria ([@sloria](https://github.com/sloria)) +- Goran Peretin ([@gperetin](https://github.com/gperetin)) +- Hamish Downer ([@foobacca](https://github.com/foobacca)) +- Thomas Orozco ([@krallin](https://github.com/krallin)) +- Jindrich Smitka ([@s-m-i-t-a](https://github.com/s-m-i-t-a)) +- Benjamin Schwarze ([@benjixx](https://github.com/benjixx)) +- Raphi ([@raphigaziano](https://github.com/raphigaziano)) +- Thomas Chiroux ([@ThomasChiroux](https://github.com/ThomasChiroux)) +- Sergi Almacellas Abellana ([@pokoli](https://github.com/pokoli)) +- Alex Gaynor ([@alex](https://github.com/alex)) +- Rolo ([@rolo](https://github.com/rolo)) +- Pablo ([@oubiga](https://github.com/oubiga)) +- Bruno Rocha ([@rochacbruno](https://github.com/rochacbruno)) +- Alexander Artemenko ([@svetlyak40wt](https://github.com/svetlyak40wt)) +- Mahmoud Abdelkader ([@mahmoudimus](https://github.com/mahmoudimus)) +- Leonardo Borges Avelino ([@lborgav](https://github.com/lborgav)) +- Chris Trotman ([@solarnz](https://github.com/solarnz)) +- Rolf ([@relekang](https://github.com/relekang)) +- Noah Kantrowitz ([@coderanger](https://github.com/coderanger)) +- Vincent Bernat ([@vincentbernat](https://github.com/vincentbernat)) +- Germán Moya ([@pbacterio](https://github.com/pbacterio)) +- Ned Batchelder ([@nedbat](https://github.com/nedbat)) +- Dave Dash ([@davedash](https://github.com/davedash)) +- Johan Charpentier ([@cyberj](https://github.com/cyberj)) +- Éric Araujo ([@merwok](https://github.com/merwok)) +- saxix ([@saxix](https://github.com/saxix)) +- Tzu-ping Chung ([@uranusjr](https://github.com/uranusjr)) +- Caleb Hattingh ([@cjrh](https://github.com/cjrh)) +- Flavio Curella ([@fcurella](https://github.com/fcurella)) +- Adam Venturella ([@aventurella](https://github.com/aventurella)) +- Monty Taylor ([@emonty](https://github.com/emonty)) +- schacki ([@schacki](https://github.com/schacki)) +- Ryan Olson ([@ryanolson](https://github.com/ryanolson)) +- Trey Hunner ([@treyhunner](https://github.com/treyhunner)) +- Russell Keith-Magee ([@freakboy3742](https://github.com/freakboy3742)) +- Mishbah Razzaque ([@mishbahr](https://github.com/mishbahr)) +- Robin Andeer ([@robinandeer](https://github.com/robinandeer)) +- Rachel Sanders ([@trustrachel](https://github.com/trustrachel)) +- Rémy Hubscher ([@Natim](https://github.com/Natim)) +- Dino Petron3 ([@dinopetrone](https://github.com/dinopetrone)) +- Peter Inglesby ([@inglesp](https://github.com/inglesp)) +- Ramiro Batista da Luz ([@ramiroluz](https://github.com/ramiroluz)) +- Omer Katz ([@thedrow](https://github.com/thedrow)) +- lord63 ([@lord63](https://github.com/lord63)) +- Randy Syring ([@rsyring](https://github.com/rsyring)) +- Mark Jones ([@mark0978](https://github.com/mark0978)) +- Marc Abramowitz ([@msabramo](https://github.com/msabramo)) +- Lucian Ursu ([@LucianU](https://github.com/LucianU)) +- Osvaldo Santana Neto ([@osantana](https://github.com/osantana)) +- Matthias84 ([@Matthias84](https://github.com/Matthias84)) +- Simeon Visser ([@svisser](https://github.com/svisser)) +- Guruprasad ([@lgp171188](https://github.com/lgp171188)) +- Charles-Axel Dein ([@charlax](https://github.com/charlax)) +- Diego Garcia ([@drgarcia1986](https://github.com/drgarcia1986)) +- maiksensi ([@maiksensi](https://github.com/maiksensi)) +- Andrew Conti ([@agconti](https://github.com/agconti)) +- Valentin Lab ([@vaab](https://github.com/vaab)) +- Ilja Bauer ([@iljabauer](https://github.com/iljabauer)) +- Elias Dorneles ([@eliasdorneles](https://github.com/eliasdorneles)) +- Matias Saguir ([@mativs](https://github.com/mativs)) +- Johannes ([@johtso](https://github.com/johtso)) +- macrotim ([@macrotim](https://github.com/macrotim)) +- Will McGinnis ([@wdm0006](https://github.com/wdm0006)) +- Cédric Krier ([@cedk](https://github.com/cedk)) +- Tim Osborn ([@ptim](https://github.com/ptim)) +- Aaron Gallagher ([@habnabit](https://github.com/habnabit)) +- mozillazg ([@mozillazg](https://github.com/mozillazg)) +- Joachim Jablon ([@ewjoachim](https://github.com/ewjoachim)) +- Andrew Ittner ([@tephyr](https://github.com/tephyr)) +- Diane DeMers Chen ([@purplediane](https://github.com/purplediane)) +- zzzirk ([@zzzirk](https://github.com/zzzirk)) +- Carol Willing ([@willingc](https://github.com/willingc)) +- phoebebauer ([@phoebebauer](https://github.com/phoebebauer)) +- Adam Chainz ([@adamchainz](https://github.com/adamchainz)) +- Sulé ([@suledev](https://github.com/suledev)) +- Evan Palmer ([@palmerev](https://github.com/palmerev)) +- Bruce Eckel ([@BruceEckel](https://github.com/BruceEckel)) +- Robert Lyon ([@ivanlyon](https://github.com/ivanlyon)) +- Terry Bates ([@terryjbates](https://github.com/terryjbates)) +- Brett Cannon ([@brettcannon](https://github.com/brettcannon)) +- Michael Warkentin ([@mwarkentin](https://github.com/mwarkentin)) +- Bartłomiej Kurzeja ([@B3QL](https://github.com/B3QL)) +- Thomas O'Donnell ([@andytom](https://github.com/andytom)) +- Jeremy Carbaugh ([@jcarbaugh](https://github.com/jcarbaugh)) +- Nathan Cheung ([@cheungnj](https://github.com/cheungnj)) +- Abdó Roig-Maranges ([@aroig](https://github.com/aroig)) +- Steve Piercy ([@stevepiercy](https://github.com/stevepiercy)) +- Corey ([@coreysnyder04](https://github.com/coreysnyder04)) +- Dmitry Evstratov ([@devstrat](https://github.com/devstrat)) +- Eyal Levin ([@eyalev](https://github.com/eyalev)) +- mathagician ([@mathagician](https://github.com/mathagician)) +- Guillaume Gelin ([@ramnes](https://github.com/ramnes)) +- @delirious-lettuce ([@delirious-lettuce](https://github.com/delirious-lettuce)) +- Gasper Vozel ([@karantan](https://github.com/karantan)) +- Joshua Carp ([@jmcarp](https://github.com/jmcarp)) +- @meahow ([@meahow](https://github.com/meahow)) +- Andrea Grandi ([@andreagrandi](https://github.com/andreagrandi)) +- Issa Jubril ([@jubrilissa](https://github.com/jubrilissa)) +- Nytiennzo Madooray ([@Nythiennzo](https://github.com/Nythiennzo)) +- Erik Bachorski ([@dornheimer](https://github.com/dornheimer)) +- cclauss ([@cclauss](https://github.com/cclauss)) +- Andy Craze ([@accraze](https://github.com/accraze)) +- Anthony Sottile ([@asottile](https://github.com/asottile)) +- Jonathan Sick ([@jonathansick](https://github.com/jonathansick)) +- Hugo ([@hugovk](https://github.com/hugovk)) +- Min ho Kim ([@minho42](https://github.com/minho42)) +- Ryan Ly ([@rly](https://github.com/rly)) +- Akintola Rahmat ([@mihrab34](https://github.com/mihrab34)) +- Jai Ram Rideout ([@jairideout](https://github.com/jairideout)) +- Diego Carrasco Gubernatis ([@dacog](https://github.com/dacog)) +- Wagner Negrão ([@wagnernegrao](https://github.com/wagnernegrao)) +- Josh Barnes ([@jcb91](https://github.com/jcb91)) +- Nikita Sobolev ([@sobolevn](https://github.com/sobolevn)) +- Matt Stibbs ([@mattstibbs](https://github.com/mattstibbs)) +- MinchinWeb ([@MinchinWeb](https://github.com/MinchinWeb)) +- kishan ([@kishan](https://github.com/kishan3)) +- tonytheleg ([@tonytheleg](https://github.com/tonytheleg)) +- Roman Hartmann ([@RomHartmann](https://github.com/RomHartmann)) +- DSEnvel ([@DSEnvel](https://github.com/DSEnvel)) +- kishan ([@kishan](https://github.com/kishan3)) +- Bruno Alla ([@browniebroke](https://github.com/browniebroke)) +- nicain ([@nicain](https://github.com/nicain)) +- Carsten Rösnick-Neugebauer ([@croesnick](https://github.com/croesnick)) +- igorbasko01 ([@igorbasko01](https://github.com/igorbasko01)) +- Dan Booth Dev ([@DanBoothDev](https://github.com/DanBoothDev)) +- Pablo Panero ([@ppanero](https://github.com/ppanero)) +- Chuan-Heng Hsiao ([@chhsiao1981](https://github.com/chhsiao1981)) +- Mohammad Hossein Sekhavat ([@mhsekhavat](https://github.com/mhsekhavat)) +- Amey Joshi ([@amey589](https://github.com/amey589)) +- Paul Harrison ([@smoothml](https://github.com/smoothml)) +- Fabio Todaro ([@SharpEdgeMarshall](https://github.com/SharpEdgeMarshall)) +- Nicholas Bollweg ([@bollwyvl](https://github.com/bollwyvl)) +- Jace Browning ([@jacebrowning](https://github.com/jacebrowning)) +- Ionel Cristian Mărieș ([@ionelmc](https://github.com/ionelmc)) +- Kishan Mehta ([@kishan3](https://github.com/kishan3)) +- Wieland Hoffmann ([@mineo](https://github.com/mineo)) +- Antony Lee ([@anntzer](https://github.com/anntzer)) +- Aurélien Gâteau ([@agateau](https://github.com/agateau)) +- Axel H. ([@noirbizarre](https://github.com/noirbizarre)) +- Chris ([@chrisbrake](https://github.com/chrisbrake)) +- Chris Streeter ([@streeter](https://github.com/streeter)) +- Gábor Lipták ([@gliptak](https://github.com/gliptak)) +- Javier Sánchez Portero ([@javiersanp](https://github.com/javiersanp)) +- Nimrod Milo ([@milonimrod](https://github.com/milonimrod)) +- Philipp Kats ([@Casyfill](https://github.com/Casyfill)) +- Reinout van Rees ([@reinout](https://github.com/reinout)) +- Rémy Greinhofer ([@rgreinho](https://github.com/rgreinho)) +- Sebastian ([@sebix](https://github.com/sebix)) +- Stuart Mumford ([@Cadair](https://github.com/Cadair)) +- Tom Forbes ([@orf](https://github.com/orf)) +- Xie Yanbo ([@xyb](https://github.com/xyb)) +- Maxim Ivanov ([@ivanovmg](https://github.com/ivanovmg)) + +## Backers + +We would like to thank the following people for supporting us in our efforts to maintain and improve Cookiecutter: + +- Alex DeBrie +- Alexandre Y. Harano +- Bruno Alla +- Carol Willing +- Russell Keith-Magee + +## Sprint Contributors + +### PyCon 2016 Sprint + +The following people made contributions to the cookiecutter project at the PyCon sprints in Portland, OR from June 2-5 2016. +Contributions include user testing, debugging, improving documentation, reviewing issues, writing tutorials, creating and updating project templates, and teaching each other. + +- Adam Chainz ([@adamchainz](https://github.com/adamchainz)) +- Andrew Ittner ([@tephyr](https://github.com/tephyr)) +- Audrey Roy Greenfeld ([@audreyr](https://github.com/audreyr)) +- Carol Willing ([@willingc](https://github.com/willingc)) +- Christopher Clarke ([@chrisdev](https://github.com/chrisdev)) +- Citlalli Murillo ([@citmusa](https://github.com/citmusa)) +- Daniel Roy Greenfeld ([@pydanny](https://github.com/pydanny)) +- Diane DeMers Chen ([@purplediane](https://github.com/purplediane)) +- Elaine Wong ([@elainewong](https://github.com/elainewong)) +- Elias Dorneles ([@eliasdorneles](https://github.com/eliasdorneles)) +- Emily Cain ([@emcain](https://github.com/emcain)) +- John Roa ([@jhonjairoroa87](https://github.com/jhonjairoroa87)) +- Jonan Scheffler ([@1337807](https://github.com/1337807)) +- Phoebe Bauer ([@phoebebauer](https://github.com/phoebebauer)) +- Kartik Sundararajan ([@skarbot](https://github.com/skarbot)) +- Katia Lira ([@katialira](https://github.com/katialira)) +- Leonardo Jimenez ([@xpostudio4](https://github.com/xpostudio4)) +- Lindsay Slazakowski ([@lslaz1](https://github.com/lslaz1)) +- Meghan Heintz ([@dot2dotseurat](https://github.com/dot2dotseurat)) +- Raphael Pierzina ([@hackebrot](https://github.com/hackebrot)) +- Umair Ashraf ([@umrashrf](https://github.com/umrashrf)) +- Valdir Stumm Junior ([@stummjr](https://github.com/stummjr)) +- Vivian Guillen ([@viviangb](https://github.com/viviangb)) +- Zaro ([@zaro0508](https://github.com/zaro0508)) diff --git a/third_party/python/cookiecutter/cookiecutter-2.1.1.dist-info/LICENSE b/third_party/python/cookiecutter/cookiecutter-2.1.1.dist-info/LICENSE new file mode 100644 index 0000000000000..06486a8f39e4b --- /dev/null +++ b/third_party/python/cookiecutter/cookiecutter-2.1.1.dist-info/LICENSE @@ -0,0 +1,32 @@ +Copyright (c) 2013-2021, Audrey Roy Greenfeld +All rights reserved. + +Redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following +conditions are met: + +* Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following +disclaimer in the documentation and/or other materials provided +with the distribution. + +* Neither the name of the copyright holder nor the names of its +contributors may be used to endorse or promote products derived +from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND +CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF +USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED +AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. diff --git a/third_party/python/cookiecutter/cookiecutter-2.1.1.dist-info/METADATA b/third_party/python/cookiecutter/cookiecutter-2.1.1.dist-info/METADATA new file mode 100644 index 0000000000000..43b7238d8f28b --- /dev/null +++ b/third_party/python/cookiecutter/cookiecutter-2.1.1.dist-info/METADATA @@ -0,0 +1,256 @@ +Metadata-Version: 2.1 +Name: cookiecutter +Version: 2.1.1 +Summary: A command-line utility that creates projects from project templates, e.g. creating a Python package project from a Python package project template. +Home-page: https://github.com/cookiecutter/cookiecutter +Author: Audrey Feldroy +Author-email: audreyr@gmail.com +License: BSD +Keywords: cookiecutter,Python,projects,project templates,Jinja2,skeleton,scaffolding,project directory,package,packaging +Classifier: Development Status :: 5 - Production/Stable +Classifier: Environment :: Console +Classifier: Intended Audience :: Developers +Classifier: Natural Language :: English +Classifier: License :: OSI Approved :: BSD License +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Programming Language :: Python +Classifier: Topic :: Software Development +Requires-Python: >=3.7 +Description-Content-Type: text/markdown +License-File: LICENSE +License-File: AUTHORS.md +Requires-Dist: binaryornot (>=0.4.4) +Requires-Dist: Jinja2 (<4.0.0,>=2.7) +Requires-Dist: click (<9.0.0,>=7.0) +Requires-Dist: pyyaml (>=5.3.1) +Requires-Dist: jinja2-time (>=0.2.0) +Requires-Dist: python-slugify (>=4.0.0) +Requires-Dist: requests (>=2.23.0) + +# Cookiecutter + +[![pypi](https://img.shields.io/pypi/v/cookiecutter.svg)](https://pypi.org/project/cookiecutter/) +[![python](https://img.shields.io/pypi/pyversions/cookiecutter.svg)](https://pypi.org/project/cookiecutter/) +[![Build Status](https://github.com/cookiecutter/cookiecutter/actions/workflows/main.yml/badge.svg?branch=master)](https://github.com/cookiecutter/cookiecutter/actions) +[![codecov](https://codecov.io/gh/cookiecutter/cookiecutter/branch/master/graphs/badge.svg?branch=master)](https://codecov.io/github/cookiecutter/cookiecutter?branch=master) +[![discord](https://img.shields.io/badge/Discord-cookiecutter-5865F2?style=flat&logo=discord&logoColor=white)](https://discord.gg/9BrxzPKuEW) +[![docs](https://readthedocs.org/projects/cookiecutter/badge/?version=latest)](https://readthedocs.org/projects/cookiecutter/?badge=latest) +[![Code Quality](https://img.shields.io/scrutinizer/g/cookiecutter/cookiecutter.svg)](https://scrutinizer-ci.com/g/cookiecutter/cookiecutter/?branch=master) + +A command-line utility that creates projects from **cookiecutters** (project templates), e.g. creating a Python package project from a Python package project template. + +- Documentation: [https://cookiecutter.readthedocs.io](https://cookiecutter.readthedocs.io) +- GitHub: [https://github.com/cookiecutter/cookiecutter](https://github.com/cookiecutter/cookiecutter) +- PyPI: [https://pypi.org/project/cookiecutter/](https://pypi.org/project/cookiecutter/) +- Free and open source software: [BSD license](https://github.com/cookiecutter/cookiecutter/blob/master/LICENSE) + +![Cookiecutter](https://raw.githubusercontent.com/cookiecutter/cookiecutter/3ac078356adf5a1a72042dfe72ebfa4a9cd5ef38/logo/cookiecutter_medium.png) + +## Features + +- Cross-platform: Windows, Mac, and Linux are officially supported. +- You don't have to know/write Python code to use Cookiecutter. +- Works with Python 3.7, 3.8, 3.9., 3.10 +- Project templates can be in any programming language or markup format: + Python, JavaScript, Ruby, CoffeeScript, RST, Markdown, CSS, HTML, you name it. + You can use multiple languages in the same project template. + +### For users of existing templates + +- Simple command line usage: + + ```bash + # Create project from the cookiecutter-pypackage.git repo template + # You'll be prompted to enter values. + # Then it'll create your Python package in the current working directory, + # based on those values. + $ cookiecutter https://github.com/audreyfeldroy/cookiecutter-pypackage + # For the sake of brevity, repos on GitHub can just use the 'gh' prefix + $ cookiecutter gh:audreyfeldroy/cookiecutter-pypackage + ``` + +- Use it at the command line with a local template: + + ```bash + # Create project in the current working directory, from the local + # cookiecutter-pypackage/ template + $ cookiecutter cookiecutter-pypackage/ + ``` + +- Or use it from Python: + + ```py + from cookiecutter.main import cookiecutter + + # Create project from the cookiecutter-pypackage/ template + cookiecutter('cookiecutter-pypackage/') + + # Create project from the cookiecutter-pypackage.git repo template + cookiecutter('https://github.com/audreyfeldroy/cookiecutter-pypackage.git') + ``` + +- Unless you suppress it with `--no-input`, you are prompted for input: + - Prompts are the keys in `cookiecutter.json`. + - Default responses are the values in `cookiecutter.json`. + - Prompts are shown in order. +- Cross-platform support for `~/.cookiecutterrc` files: + + ```yaml + default_context: + full_name: "Audrey Roy Greenfeld" + email: "audreyr@gmail.com" + github_username: "audreyfeldroy" + cookiecutters_dir: "~/.cookiecutters/" + ``` + +- Cookiecutters (cloned Cookiecutter project templates) are put into `~/.cookiecutters/` by default, or cookiecutters_dir if specified. +- If you have already cloned a cookiecutter into `~/.cookiecutters/`, you can reference it by directory name: + + ```bash + # Clone cookiecutter-pypackage + $ cookiecutter gh:audreyfeldroy/cookiecutter-pypackage + # Now you can use the already cloned cookiecutter by name + $ cookiecutter cookiecutter-pypackage + ``` + +- You can use local cookiecutters, or remote cookiecutters directly from Git repos or from Mercurial repos on Bitbucket. +- Default context: specify key/value pairs that you want used as defaults whenever you generate a project. +- Inject extra context with command-line arguments: + + ```bash + cookiecutter --no-input gh:msabramo/cookiecutter-supervisor program_name=foobar startsecs=10 + ``` + +- Direct access to the Cookiecutter API allows for injection of extra context. +- Paths to local projects can be specified as absolute or relative. +- Projects generated to your current directory or to target directory if specified with `-o` option. + +### For template creators + +- Supports unlimited levels of directory nesting. +- 100% of templating is done with Jinja2. +- Both, directory names and filenames can be templated. + For example: + + ```py + {{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}.py + ``` +- Simply define your template variables in a `cookiecutter.json` file. + For example: + + ```json + { + "full_name": "Audrey Roy Greenfeld", + "email": "audreyr@gmail.com", + "project_name": "Complexity", + "repo_name": "complexity", + "project_short_description": "Refreshingly simple static site generator.", + "release_date": "2013-07-10", + "year": "2013", + "version": "0.1.1" + } + ``` +- Pre- and post-generate hooks: Python or shell scripts to run before or after generating a project. + +## Available Cookiecutters + +Making great cookies takes a lot of cookiecutters and contributors. +We're so pleased that there are many Cookiecutter project templates to choose from. +We hope you find a cookiecutter that is just right for your needs. + +### A Pantry Full of Cookiecutters + +The best place to start searching for specific and ready to use cookiecutter template is [Github search](https://github.com/search?q=cookiecutter&type=Repositories). +Just type `cookiecutter` and you will discover over 4000 related repositories. + +We also recommend you to check related GitHub topics. +For general search use [cookiecutter-template](https://github.com/topics/cookiecutter-template). +For specific topics try to use `cookiecutter-yourtopic`, like `cookiecutter-python` or `cookiecutter-datascience`. +This is a new GitHub feature, so not all active repositories use it at the moment. + +If you are template developer please add related [topics](https://help.github.com/en/github/administering-a-repository/classifying-your-repository-with-topics) with `cookiecutter` prefix to you repository. +We believe it will make it more discoverable. +You are almost not limited in topics amount, use it! + +### Cookiecutter Specials + +These Cookiecutters are maintained by the cookiecutter team: + +- [cookiecutter-pypackage](https://github.com/audreyfeldroy/cookiecutter-pypackage): + ultimate Python package project template by [@audreyfeldroy's](https://github.com/audreyfeldroy). +- [cookiecutter-django](https://github.com/pydanny/cookiecutter-django): + a framework for jumpstarting production-ready Django projects quickly. + It is bleeding edge with Bootstrap 5, customizable users app, starter templates, working user registration, celery setup, and much more. +- [cookiecutter-pytest-plugin](https://github.com/pytest-dev/cookiecutter-pytest-plugin): + Minimal Cookiecutter template for authoring [pytest](https://docs.pytest.org/) plugins that help you to write better programs. + +## Community + +The core committer team can be found in [authors section](AUTHORS.md). +We are always welcome and invite you to participate. + +Stuck? Try one of the following: + +- See the [Troubleshooting](https://cookiecutter.readthedocs.io/en/latest/troubleshooting.html) page. +- Ask for help on [Stack Overflow](https://stackoverflow.com/questions/tagged/cookiecutter). +- You are strongly encouraged to [file an issue](https://github.com/cookiecutter/cookiecutter/issues?q=is%3Aopen) about the problem. + Do it even if it's just "I can't get it to work on this cookiecutter" with a link to your cookiecutter. + Don't worry about naming/pinpointing the issue properly. +- Ask for help on [Discord](https://discord.gg/9BrxzPKuEW) if you must (but please try one of the other options first, so that others can benefit from the discussion). + +Development on Cookiecutter is community-driven: + +- Huge thanks to all the [contributors](AUTHORS.md) who have pitched in to help make Cookiecutter an even better tool. +- Everyone is invited to contribute. + Read the [contributing instructions](CONTRIBUTING.md), then get started. +- Connect with other Cookiecutter contributors and users on [Discord](https://discord.gg/9BrxzPKuEW) + (note: due to work and other commitments, a core committer might not always be available) + +Encouragement is unbelievably motivating. +If you want more work done on Cookiecutter, show support: + +- Thank a core committer for their efforts. +- Star [Cookiecutter on GitHub](https://github.com/cookiecutter/cookiecutter). +- [Support this project](#support-this-project) + +Got criticism or complaints? + +- [File an issue](https://github.com/cookiecutter/cookiecutter/issues?q=is%3Aopen) so that Cookiecutter can be improved. + Be friendly and constructive about what could be better. + Make detailed suggestions. +- **Keep us in the loop so that we can help.** + For example, if you are discussing problems with Cookiecutter on a mailing list, [file an issue](https://github.com/cookiecutter/cookiecutter/issues?q=is%3Aopen) where you link to the discussion thread and/or cc at least 1 core committer on the email. +- Be encouraging. + A comment like "This function ought to be rewritten like this" is much more likely to result in action than a comment like "Eww, look how bad this function is." + +Waiting for a response to an issue/question? + +- Be patient and persistent. All issues are on the core committer team's radar and will be considered thoughtfully, but we have a lot of issues to work through. + If urgent, it's fine to ping a core committer in the issue with a reminder. +- Ask others to comment, discuss, review, etc. +- Search the Cookiecutter repo for issues related to yours. +- Need a fix/feature/release/help urgently, and can't wait? + [@audreyfeldroy](https://github.com/audreyfeldroy) is available for hire for consultation or custom development. + +## Support This Project + +This project is run by volunteers. +Shortly we will be providing means for organizations and individuals to support the project. + +## Code of Conduct + +Everyone interacting in the Cookiecutter project's codebases and documentation is expected to follow the [PyPA Code of Conduct](https://www.pypa.io/en/latest/code-of-conduct/). +This includes, but is not limited to, issue trackers, chat rooms, mailing lists, and other virtual or in real life communication. + +## Creator / Leader + +This project was created and is led by [Audrey Roy Greenfeld](https://github.com/audreyfeldroy). + +She is supported by a team of maintainers. diff --git a/third_party/python/cookiecutter/cookiecutter-2.1.1.dist-info/RECORD b/third_party/python/cookiecutter/cookiecutter-2.1.1.dist-info/RECORD new file mode 100644 index 0000000000000..e9bbcd08c4056 --- /dev/null +++ b/third_party/python/cookiecutter/cookiecutter-2.1.1.dist-info/RECORD @@ -0,0 +1,25 @@ +cookiecutter/__init__.py,sha256=mvVTrlACc_h48704u9mn8R7qybdzRDm3kUJYaag2U5I,59 +cookiecutter/__main__.py,sha256=0i3swGdJG0xGGydZ8oVXMVv17yQm3kHzRaP35B6uEas,194 +cookiecutter/cli.py,sha256=CWBPBcFBe6P-JqKymRLGmtOU9SBed_gso7nsffi_uCw,6926 +cookiecutter/config.py,sha256=Sy3a9nrybICBvI5YLYS2yUdA2JEbrBJhWUGB2RIDWZ8,4250 +cookiecutter/environment.py,sha256=nCEeEc8puQlMJsU6OGtWzj2QIwbR_cSLY07Z9gmOEHQ,2259 +cookiecutter/exceptions.py,sha256=ri744cAhzcMR86NCzbqq0QzpKZ5Mrsq0Hhl0_6sg8vg,3886 +cookiecutter/extensions.py,sha256=FrxrIxgYnnXTMa8hy38n6E5b9WS70PdpkJlLjip_hoI,1861 +cookiecutter/find.py,sha256=L1JE51TWguZPqAGteLpDsnp3AZVJpVa1Cjl7j5tuYoQ,1008 +cookiecutter/generate.py,sha256=vA6q8IOgrXLrUj2LZAH4vSL8SJXtZdYK8FPVmT56EPw,14827 +cookiecutter/hooks.py,sha256=eT_wRfWXBhiSfMhHerrx8ACAMeexu91fbqtk7NE1oI8,4227 +cookiecutter/log.py,sha256=4KwD0yjS5jGK17pJm94jYbJDib0r_hhd_1bdNN-C5y0,1568 +cookiecutter/main.py,sha256=XA9GKJbrDCaVDivkGkBLuFbTS7rqDTWIRnrEDcwb22c,4657 +cookiecutter/prompt.py,sha256=jipyemmYF-HE3K0cmkrycoZ8TFUhJpIYMr6dxhErvg8,8197 +cookiecutter/replay.py,sha256=D2vKyMfMbZ1So0IrIk8KWzYj5l0Jbvr9yQoIO_1wb4Y,1512 +cookiecutter/repository.py,sha256=C8jk4OhGc1ldCjdZ5IXHMSJyaK-AX9GD0-jXOcPGv3g,4206 +cookiecutter/utils.py,sha256=wYctIUKvuh7yEBaBaNu_H_ohNIOlE-e8FYCIbemgARw,3136 +cookiecutter/vcs.py,sha256=RNb_L_pRbezB3tlvEVwBBxDiW7yLPJ1ku978V1BFFzc,4184 +cookiecutter/zipfile.py,sha256=uFvUACBImliMpFfZDOfly0FAGggikvJQzjsc6li4KKc,4264 +cookiecutter-2.1.1.dist-info/AUTHORS.md,sha256=wr92S5G373_A1QbLpGehSS6AqWMqz_GMbNSCRVAzxaU,11784 +cookiecutter-2.1.1.dist-info/LICENSE,sha256=iPRc_2ncuesJXa8jA4O-ytRyYZSrjJsEHURe72StH2s,1493 +cookiecutter-2.1.1.dist-info/METADATA,sha256=iSZkpbgaptuhc1KYr2okLKP5M0alCGPxalF7GXOUleU,12593 +cookiecutter-2.1.1.dist-info/WHEEL,sha256=z9j0xAa_JmUKMpmz72K0ZGALSM_n-wQVmGbleXx2VHg,110 +cookiecutter-2.1.1.dist-info/entry_points.txt,sha256=NzYmXG0J4ML4EmM5POUl9I5PxNN1qlDVgOtkjVR8Fng,60 +cookiecutter-2.1.1.dist-info/top_level.txt,sha256=UE0NGj4iqLNgC-5CAY4V94Tqp9mAD8HqwvZpG9z6cGY,13 +cookiecutter-2.1.1.dist-info/RECORD,, diff --git a/third_party/python/cookiecutter/cookiecutter-2.1.1.dist-info/WHEEL b/third_party/python/cookiecutter/cookiecutter-2.1.1.dist-info/WHEEL new file mode 100644 index 0000000000000..0b18a281107a0 --- /dev/null +++ b/third_party/python/cookiecutter/cookiecutter-2.1.1.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.37.1) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/third_party/python/cookiecutter/cookiecutter-2.1.1.dist-info/entry_points.txt b/third_party/python/cookiecutter/cookiecutter-2.1.1.dist-info/entry_points.txt new file mode 100644 index 0000000000000..a1a3da0ad20eb --- /dev/null +++ b/third_party/python/cookiecutter/cookiecutter-2.1.1.dist-info/entry_points.txt @@ -0,0 +1,2 @@ +[console_scripts] +cookiecutter = cookiecutter.__main__:main diff --git a/third_party/python/cookiecutter/cookiecutter-2.1.1.dist-info/top_level.txt b/third_party/python/cookiecutter/cookiecutter-2.1.1.dist-info/top_level.txt new file mode 100644 index 0000000000000..c8e988bc24cdf --- /dev/null +++ b/third_party/python/cookiecutter/cookiecutter-2.1.1.dist-info/top_level.txt @@ -0,0 +1 @@ +cookiecutter diff --git a/third_party/python/cookiecutter/cookiecutter/__init__.py b/third_party/python/cookiecutter/cookiecutter/__init__.py new file mode 100644 index 0000000000000..f0e3a2c38d8f1 --- /dev/null +++ b/third_party/python/cookiecutter/cookiecutter/__init__.py @@ -0,0 +1,2 @@ +"""Main package for Cookiecutter.""" +__version__ = "2.1.1" diff --git a/third_party/python/cookiecutter/cookiecutter/__main__.py b/third_party/python/cookiecutter/cookiecutter/__main__.py new file mode 100644 index 0000000000000..9ac3661726188 --- /dev/null +++ b/third_party/python/cookiecutter/cookiecutter/__main__.py @@ -0,0 +1,6 @@ +"""Allow cookiecutter to be executable through `python -m cookiecutter`.""" +from cookiecutter.cli import main + + +if __name__ == "__main__": # pragma: no cover + main(prog_name="cookiecutter") diff --git a/third_party/python/cookiecutter/cookiecutter/cli.py b/third_party/python/cookiecutter/cookiecutter/cli.py new file mode 100644 index 0000000000000..a792fa5f568f4 --- /dev/null +++ b/third_party/python/cookiecutter/cookiecutter/cli.py @@ -0,0 +1,231 @@ +"""Main `cookiecutter` CLI.""" +import collections +import json +import os +import sys + +import click + +from cookiecutter import __version__ +from cookiecutter.exceptions import ( + ContextDecodingException, + FailedHookException, + InvalidModeException, + InvalidZipRepository, + OutputDirExistsException, + RepositoryCloneFailed, + RepositoryNotFound, + UndefinedVariableInTemplate, + UnknownExtension, +) +from cookiecutter.log import configure_logger +from cookiecutter.main import cookiecutter +from cookiecutter.config import get_user_config + + +def version_msg(): + """Return the Cookiecutter version, location and Python powering it.""" + python_version = sys.version + location = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + message = 'Cookiecutter %(version)s from {} (Python {})' + return message.format(location, python_version) + + +def validate_extra_context(ctx, param, value): + """Validate extra context.""" + for s in value: + if '=' not in s: + raise click.BadParameter( + 'EXTRA_CONTEXT should contain items of the form key=value; ' + "'{}' doesn't match that form".format(s) + ) + + # Convert tuple -- e.g.: ('program_name=foobar', 'startsecs=66') + # to dict -- e.g.: {'program_name': 'foobar', 'startsecs': '66'} + return collections.OrderedDict(s.split('=', 1) for s in value) or None + + +def list_installed_templates(default_config, passed_config_file): + """List installed (locally cloned) templates. Use cookiecutter --list-installed.""" + config = get_user_config(passed_config_file, default_config) + cookiecutter_folder = config.get('cookiecutters_dir') + if not os.path.exists(cookiecutter_folder): + click.echo( + 'Error: Cannot list installed templates. Folder does not exist: ' + '{}'.format(cookiecutter_folder) + ) + sys.exit(-1) + + template_names = [ + folder + for folder in os.listdir(cookiecutter_folder) + if os.path.exists( + os.path.join(cookiecutter_folder, folder, 'cookiecutter.json') + ) + ] + click.echo(f'{len(template_names)} installed templates: ') + for name in template_names: + click.echo(f' * {name}') + + +@click.command(context_settings=dict(help_option_names=['-h', '--help'])) +@click.version_option(__version__, '-V', '--version', message=version_msg()) +@click.argument('template', required=False) +@click.argument('extra_context', nargs=-1, callback=validate_extra_context) +@click.option( + '--no-input', + is_flag=True, + help='Do not prompt for parameters and only use cookiecutter.json file content', +) +@click.option( + '-c', + '--checkout', + help='branch, tag or commit to checkout after git clone', +) +@click.option( + '--directory', + help='Directory within repo that holds cookiecutter.json file ' + 'for advanced repositories with multi templates in it', +) +@click.option( + '-v', '--verbose', is_flag=True, help='Print debug information', default=False +) +@click.option( + '--replay', + is_flag=True, + help='Do not prompt for parameters and only use information entered previously', +) +@click.option( + '--replay-file', + type=click.Path(), + default=None, + help='Use this file for replay instead of the default.', +) +@click.option( + '-f', + '--overwrite-if-exists', + is_flag=True, + help='Overwrite the contents of the output directory if it already exists', +) +@click.option( + '-s', + '--skip-if-file-exists', + is_flag=True, + help='Skip the files in the corresponding directories if they already exist', + default=False, +) +@click.option( + '-o', + '--output-dir', + default='.', + type=click.Path(), + help='Where to output the generated project dir into', +) +@click.option( + '--config-file', type=click.Path(), default=None, help='User configuration file' +) +@click.option( + '--default-config', + is_flag=True, + help='Do not load a config file. Use the defaults instead', +) +@click.option( + '--debug-file', + type=click.Path(), + default=None, + help='File to be used as a stream for DEBUG logging', +) +@click.option( + '--accept-hooks', + type=click.Choice(['yes', 'ask', 'no']), + default='yes', + help='Accept pre/post hooks', +) +@click.option( + '-l', '--list-installed', is_flag=True, help='List currently installed templates.' +) +def main( + template, + extra_context, + no_input, + checkout, + verbose, + replay, + overwrite_if_exists, + output_dir, + config_file, + default_config, + debug_file, + directory, + skip_if_file_exists, + accept_hooks, + replay_file, + list_installed, +): + """Create a project from a Cookiecutter project template (TEMPLATE). + + Cookiecutter is free and open source software, developed and managed by + volunteers. If you would like to help out or fund the project, please get + in touch at https://github.com/cookiecutter/cookiecutter. + """ + # Commands that should work without arguments + if list_installed: + list_installed_templates(default_config, config_file) + sys.exit(0) + + # Raising usage, after all commands that should work without args. + if not template or template.lower() == 'help': + click.echo(click.get_current_context().get_help()) + sys.exit(0) + + configure_logger(stream_level='DEBUG' if verbose else 'INFO', debug_file=debug_file) + + # If needed, prompt the user to ask whether or not they want to execute + # the pre/post hooks. + if accept_hooks == "ask": + _accept_hooks = click.confirm("Do you want to execute hooks?") + else: + _accept_hooks = accept_hooks == "yes" + + if replay_file: + replay = replay_file + + try: + cookiecutter( + template, + checkout, + no_input, + extra_context=extra_context, + replay=replay, + overwrite_if_exists=overwrite_if_exists, + output_dir=output_dir, + config_file=config_file, + default_config=default_config, + password=os.environ.get('COOKIECUTTER_REPO_PASSWORD'), + directory=directory, + skip_if_file_exists=skip_if_file_exists, + accept_hooks=_accept_hooks, + ) + except ( + ContextDecodingException, + OutputDirExistsException, + InvalidModeException, + FailedHookException, + UnknownExtension, + InvalidZipRepository, + RepositoryNotFound, + RepositoryCloneFailed, + ) as e: + click.echo(e) + sys.exit(1) + except UndefinedVariableInTemplate as undefined_err: + click.echo(f'{undefined_err.message}') + click.echo(f'Error message: {undefined_err.error.message}') + + context_str = json.dumps(undefined_err.context, indent=4, sort_keys=True) + click.echo(f'Context: {context_str}') + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/third_party/python/cookiecutter/cookiecutter/config.py b/third_party/python/cookiecutter/cookiecutter/config.py new file mode 100644 index 0000000000000..0d0fa8c7e1451 --- /dev/null +++ b/third_party/python/cookiecutter/cookiecutter/config.py @@ -0,0 +1,122 @@ +"""Global configuration handling.""" +import collections +import copy +import logging +import os + +import yaml + +from cookiecutter.exceptions import ConfigDoesNotExistException, InvalidConfiguration + +logger = logging.getLogger(__name__) + +USER_CONFIG_PATH = os.path.expanduser('~/.cookiecutterrc') + +BUILTIN_ABBREVIATIONS = { + 'gh': 'https://github.com/{0}.git', + 'gl': 'https://gitlab.com/{0}.git', + 'bb': 'https://bitbucket.org/{0}', +} + +DEFAULT_CONFIG = { + 'cookiecutters_dir': os.path.expanduser('~/.cookiecutters/'), + 'replay_dir': os.path.expanduser('~/.cookiecutter_replay/'), + 'default_context': collections.OrderedDict([]), + 'abbreviations': BUILTIN_ABBREVIATIONS, +} + + +def _expand_path(path): + """Expand both environment variables and user home in the given path.""" + path = os.path.expandvars(path) + path = os.path.expanduser(path) + return path + + +def merge_configs(default, overwrite): + """Recursively update a dict with the key/value pair of another. + + Dict values that are dictionaries themselves will be updated, whilst + preserving existing keys. + """ + new_config = copy.deepcopy(default) + + for k, v in overwrite.items(): + # Make sure to preserve existing items in + # nested dicts, for example `abbreviations` + if isinstance(v, dict): + new_config[k] = merge_configs(default.get(k, {}), v) + else: + new_config[k] = v + + return new_config + + +def get_config(config_path): + """Retrieve the config from the specified path, returning a config dict.""" + if not os.path.exists(config_path): + raise ConfigDoesNotExistException(f'Config file {config_path} does not exist.') + + logger.debug('config_path is %s', config_path) + with open(config_path, encoding='utf-8') as file_handle: + try: + yaml_dict = yaml.safe_load(file_handle) + except yaml.YAMLError as e: + raise InvalidConfiguration( + f'Unable to parse YAML file {config_path}.' + ) from e + + config_dict = merge_configs(DEFAULT_CONFIG, yaml_dict) + + raw_replay_dir = config_dict['replay_dir'] + config_dict['replay_dir'] = _expand_path(raw_replay_dir) + + raw_cookies_dir = config_dict['cookiecutters_dir'] + config_dict['cookiecutters_dir'] = _expand_path(raw_cookies_dir) + + return config_dict + + +def get_user_config(config_file=None, default_config=False): + """Return the user config as a dict. + + If ``default_config`` is True, ignore ``config_file`` and return default + values for the config parameters. + + If a path to a ``config_file`` is given, that is different from the default + location, load the user config from that. + + Otherwise look up the config file path in the ``COOKIECUTTER_CONFIG`` + environment variable. If set, load the config from this path. This will + raise an error if the specified path is not valid. + + If the environment variable is not set, try the default config file path + before falling back to the default config values. + """ + # Do NOT load a config. Return defaults instead. + if default_config: + logger.debug("Force ignoring user config with default_config switch.") + return copy.copy(DEFAULT_CONFIG) + + # Load the given config file + if config_file and config_file is not USER_CONFIG_PATH: + logger.debug("Loading custom config from %s.", config_file) + return get_config(config_file) + + try: + # Does the user set up a config environment variable? + env_config_file = os.environ['COOKIECUTTER_CONFIG'] + except KeyError: + # Load an optional user config if it exists + # otherwise return the defaults + if os.path.exists(USER_CONFIG_PATH): + logger.debug("Loading config from %s.", USER_CONFIG_PATH) + return get_config(USER_CONFIG_PATH) + else: + logger.debug("User config not found. Loading default config.") + return copy.copy(DEFAULT_CONFIG) + else: + # There is a config environment variable. Try to load it. + # Do not check for existence, so invalid file paths raise an error. + logger.debug("User config not found or not specified. Loading default config.") + return get_config(env_config_file) diff --git a/third_party/python/cookiecutter/cookiecutter/environment.py b/third_party/python/cookiecutter/cookiecutter/environment.py new file mode 100644 index 0000000000000..f2804c59508e0 --- /dev/null +++ b/third_party/python/cookiecutter/cookiecutter/environment.py @@ -0,0 +1,65 @@ +"""Jinja2 environment and extensions loading.""" +from jinja2 import Environment, StrictUndefined + +from cookiecutter.exceptions import UnknownExtension + + +class ExtensionLoaderMixin: + """Mixin providing sane loading of extensions specified in a given context. + + The context is being extracted from the keyword arguments before calling + the next parent class in line of the child. + """ + + def __init__(self, **kwargs): + """Initialize the Jinja2 Environment object while loading extensions. + + Does the following: + + 1. Establishes default_extensions (currently just a Time feature) + 2. Reads extensions set in the cookiecutter.json _extensions key. + 3. Attempts to load the extensions. Provides useful error if fails. + """ + context = kwargs.pop('context', {}) + + default_extensions = [ + 'cookiecutter.extensions.JsonifyExtension', + 'cookiecutter.extensions.RandomStringExtension', + 'cookiecutter.extensions.SlugifyExtension', + 'cookiecutter.extensions.UUIDExtension', + 'jinja2_time.TimeExtension', + ] + extensions = default_extensions + self._read_extensions(context) + + try: + super().__init__(extensions=extensions, **kwargs) + except ImportError as err: + raise UnknownExtension(f'Unable to load extension: {err}') + + def _read_extensions(self, context): + """Return list of extensions as str to be passed on to the Jinja2 env. + + If context does not contain the relevant info, return an empty + list instead. + """ + try: + extensions = context['cookiecutter']['_extensions'] + except KeyError: + return [] + else: + return [str(ext) for ext in extensions] + + +class StrictEnvironment(ExtensionLoaderMixin, Environment): + """Create strict Jinja2 environment. + + Jinja2 environment will raise error on undefined variable in template- + rendering context. + """ + + def __init__(self, **kwargs): + """Set the standard Cookiecutter StrictEnvironment. + + Also loading extensions defined in cookiecutter.json's _extensions key. + """ + super().__init__(undefined=StrictUndefined, **kwargs) diff --git a/third_party/python/cookiecutter/cookiecutter/exceptions.py b/third_party/python/cookiecutter/cookiecutter/exceptions.py new file mode 100644 index 0000000000000..4acf6dc47c0a8 --- /dev/null +++ b/third_party/python/cookiecutter/cookiecutter/exceptions.py @@ -0,0 +1,163 @@ +"""All exceptions used in the Cookiecutter code base are defined here.""" + + +class CookiecutterException(Exception): + """ + Base exception class. + + All Cookiecutter-specific exceptions should subclass this class. + """ + + +class NonTemplatedInputDirException(CookiecutterException): + """ + Exception for when a project's input dir is not templated. + + The name of the input directory should always contain a string that is + rendered to something else, so that input_dir != output_dir. + """ + + +class UnknownTemplateDirException(CookiecutterException): + """ + Exception for ambiguous project template directory. + + Raised when Cookiecutter cannot determine which directory is the project + template, e.g. more than one dir appears to be a template dir. + """ + + # unused locally + + +class MissingProjectDir(CookiecutterException): + """ + Exception for missing generated project directory. + + Raised during cleanup when remove_repo() can't find a generated project + directory inside of a repo. + """ + + # unused locally + + +class ConfigDoesNotExistException(CookiecutterException): + """ + Exception for missing config file. + + Raised when get_config() is passed a path to a config file, but no file + is found at that path. + """ + + +class InvalidConfiguration(CookiecutterException): + """ + Exception for invalid configuration file. + + Raised if the global configuration file is not valid YAML or is + badly constructed. + """ + + +class UnknownRepoType(CookiecutterException): + """ + Exception for unknown repo types. + + Raised if a repo's type cannot be determined. + """ + + +class VCSNotInstalled(CookiecutterException): + """ + Exception when version control is unavailable. + + Raised if the version control system (git or hg) is not installed. + """ + + +class ContextDecodingException(CookiecutterException): + """ + Exception for failed JSON decoding. + + Raised when a project's JSON context file can not be decoded. + """ + + +class OutputDirExistsException(CookiecutterException): + """ + Exception for existing output directory. + + Raised when the output directory of the project exists already. + """ + + +class InvalidModeException(CookiecutterException): + """ + Exception for incompatible modes. + + Raised when cookiecutter is called with both `no_input==True` and + `replay==True` at the same time. + """ + + +class FailedHookException(CookiecutterException): + """ + Exception for hook failures. + + Raised when a hook script fails. + """ + + +class UndefinedVariableInTemplate(CookiecutterException): + """ + Exception for out-of-scope variables. + + Raised when a template uses a variable which is not defined in the + context. + """ + + def __init__(self, message, error, context): + """Exception for out-of-scope variables.""" + self.message = message + self.error = error + self.context = context + + def __str__(self): + """Text representation of UndefinedVariableInTemplate.""" + return ( + f"{self.message}. " + f"Error message: {self.error.message}. " + f"Context: {self.context}" + ) + + +class UnknownExtension(CookiecutterException): + """ + Exception for un-importable extention. + + Raised when an environment is unable to import a required extension. + """ + + +class RepositoryNotFound(CookiecutterException): + """ + Exception for missing repo. + + Raised when the specified cookiecutter repository doesn't exist. + """ + + +class RepositoryCloneFailed(CookiecutterException): + """ + Exception for un-cloneable repo. + + Raised when a cookiecutter template can't be cloned. + """ + + +class InvalidZipRepository(CookiecutterException): + """ + Exception for bad zip repo. + + Raised when the specified cookiecutter repository isn't a valid + Zip archive. + """ diff --git a/third_party/python/cookiecutter/cookiecutter/extensions.py b/third_party/python/cookiecutter/cookiecutter/extensions.py new file mode 100644 index 0000000000000..6a3161abab802 --- /dev/null +++ b/third_party/python/cookiecutter/cookiecutter/extensions.py @@ -0,0 +1,66 @@ +"""Jinja2 extensions.""" +import json +import string +import uuid +from secrets import choice + +from jinja2.ext import Extension +from slugify import slugify as pyslugify + + +class JsonifyExtension(Extension): + """Jinja2 extension to convert a Python object to JSON.""" + + def __init__(self, environment): + """Initialize the extension with the given environment.""" + super().__init__(environment) + + def jsonify(obj): + return json.dumps(obj, sort_keys=True, indent=4) + + environment.filters['jsonify'] = jsonify + + +class RandomStringExtension(Extension): + """Jinja2 extension to create a random string.""" + + def __init__(self, environment): + """Jinja2 Extension Constructor.""" + super().__init__(environment) + + def random_ascii_string(length, punctuation=False): + if punctuation: + corpus = "".join((string.ascii_letters, string.punctuation)) + else: + corpus = string.ascii_letters + return "".join(choice(corpus) for _ in range(length)) + + environment.globals.update(random_ascii_string=random_ascii_string) + + +class SlugifyExtension(Extension): + """Jinja2 Extension to slugify string.""" + + def __init__(self, environment): + """Jinja2 Extension constructor.""" + super().__init__(environment) + + def slugify(value, **kwargs): + """Slugifies the value.""" + return pyslugify(value, **kwargs) + + environment.filters['slugify'] = slugify + + +class UUIDExtension(Extension): + """Jinja2 Extension to generate uuid4 string.""" + + def __init__(self, environment): + """Jinja2 Extension constructor.""" + super().__init__(environment) + + def uuid4(): + """Generate UUID4.""" + return str(uuid.uuid4()) + + environment.globals.update(uuid4=uuid4) diff --git a/third_party/python/cookiecutter/cookiecutter/find.py b/third_party/python/cookiecutter/cookiecutter/find.py new file mode 100644 index 0000000000000..054e286f4aad4 --- /dev/null +++ b/third_party/python/cookiecutter/cookiecutter/find.py @@ -0,0 +1,31 @@ +"""Functions for finding Cookiecutter templates and other components.""" +import logging +import os + +from cookiecutter.exceptions import NonTemplatedInputDirException + +logger = logging.getLogger(__name__) + + +def find_template(repo_dir): + """Determine which child directory of `repo_dir` is the project template. + + :param repo_dir: Local directory of newly cloned repo. + :returns project_template: Relative path to project template. + """ + logger.debug('Searching %s for the project template.', repo_dir) + + repo_dir_contents = os.listdir(repo_dir) + + project_template = None + for item in repo_dir_contents: + if 'cookiecutter' in item and '{{' in item and '}}' in item: + project_template = item + break + + if project_template: + project_template = os.path.join(repo_dir, project_template) + logger.debug('The project template appears to be %s', project_template) + return project_template + else: + raise NonTemplatedInputDirException diff --git a/third_party/python/cookiecutter/cookiecutter/generate.py b/third_party/python/cookiecutter/cookiecutter/generate.py new file mode 100644 index 0000000000000..7bdce5a8bbef6 --- /dev/null +++ b/third_party/python/cookiecutter/cookiecutter/generate.py @@ -0,0 +1,391 @@ +"""Functions for generating a project from a project template.""" +import fnmatch +import json +import logging +import os +import shutil +import warnings +from collections import OrderedDict + +from binaryornot.check import is_binary +from jinja2 import FileSystemLoader +from jinja2.exceptions import TemplateSyntaxError, UndefinedError + +from cookiecutter.environment import StrictEnvironment +from cookiecutter.exceptions import ( + ContextDecodingException, + FailedHookException, + NonTemplatedInputDirException, + OutputDirExistsException, + UndefinedVariableInTemplate, +) +from cookiecutter.find import find_template +from cookiecutter.hooks import run_hook +from cookiecutter.utils import make_sure_path_exists, rmtree, work_in + +logger = logging.getLogger(__name__) + + +def is_copy_only_path(path, context): + """Check whether the given `path` should only be copied and not rendered. + + Returns True if `path` matches a pattern in the given `context` dict, + otherwise False. + + :param path: A file-system path referring to a file or dir that + should be rendered or just copied. + :param context: cookiecutter context. + """ + try: + for dont_render in context['cookiecutter']['_copy_without_render']: + if fnmatch.fnmatch(path, dont_render): + return True + except KeyError: + return False + + return False + + +def apply_overwrites_to_context(context, overwrite_context): + """Modify the given context in place based on the overwrite_context.""" + for variable, overwrite in overwrite_context.items(): + if variable not in context: + # Do not include variables which are not used in the template + continue + + context_value = context[variable] + + if isinstance(context_value, list): + # We are dealing with a choice variable + if overwrite in context_value: + # This overwrite is actually valid for the given context + # Let's set it as default (by definition first item in list) + # see ``cookiecutter.prompt.prompt_choice_for_config`` + context_value.remove(overwrite) + context_value.insert(0, overwrite) + else: + raise ValueError( + "{} provided for choice variable {}, but the " + "choices are {}.".format(overwrite, variable, context_value) + ) + else: + # Simply overwrite the value for this variable + context[variable] = overwrite + + +def generate_context( + context_file='cookiecutter.json', default_context=None, extra_context=None +): + """Generate the context for a Cookiecutter project template. + + Loads the JSON file as a Python object, with key being the JSON filename. + + :param context_file: JSON file containing key/value pairs for populating + the cookiecutter's variables. + :param default_context: Dictionary containing config to take into account. + :param extra_context: Dictionary containing configuration overrides + """ + context = OrderedDict([]) + + try: + with open(context_file, encoding='utf-8') as file_handle: + obj = json.load(file_handle, object_pairs_hook=OrderedDict) + except ValueError as e: + # JSON decoding error. Let's throw a new exception that is more + # friendly for the developer or user. + full_fpath = os.path.abspath(context_file) + json_exc_message = str(e) + our_exc_message = ( + 'JSON decoding error while loading "{}". Decoding' + ' error details: "{}"'.format(full_fpath, json_exc_message) + ) + raise ContextDecodingException(our_exc_message) + + # Add the Python object to the context dictionary + file_name = os.path.split(context_file)[1] + file_stem = file_name.split('.')[0] + context[file_stem] = obj + + # Overwrite context variable defaults with the default context from the + # user's global config, if available + if default_context: + try: + apply_overwrites_to_context(obj, default_context) + except ValueError as ex: + warnings.warn("Invalid default received: " + str(ex)) + if extra_context: + apply_overwrites_to_context(obj, extra_context) + + logger.debug('Context generated is %s', context) + return context + + +def generate_file(project_dir, infile, context, env, skip_if_file_exists=False): + """Render filename of infile as name of outfile, handle infile correctly. + + Dealing with infile appropriately: + + a. If infile is a binary file, copy it over without rendering. + b. If infile is a text file, render its contents and write the + rendered infile to outfile. + + Precondition: + + When calling `generate_file()`, the root template dir must be the + current working directory. Using `utils.work_in()` is the recommended + way to perform this directory change. + + :param project_dir: Absolute path to the resulting generated project. + :param infile: Input file to generate the file from. Relative to the root + template dir. + :param context: Dict for populating the cookiecutter's variables. + :param env: Jinja2 template execution environment. + """ + logger.debug('Processing file %s', infile) + + # Render the path to the output file (not including the root project dir) + outfile_tmpl = env.from_string(infile) + + outfile = os.path.join(project_dir, outfile_tmpl.render(**context)) + file_name_is_empty = os.path.isdir(outfile) + if file_name_is_empty: + logger.debug('The resulting file name is empty: %s', outfile) + return + + if skip_if_file_exists and os.path.exists(outfile): + logger.debug('The resulting file already exists: %s', outfile) + return + + logger.debug('Created file at %s', outfile) + + # Just copy over binary files. Don't render. + logger.debug("Check %s to see if it's a binary", infile) + if is_binary(infile): + logger.debug('Copying binary %s to %s without rendering', infile, outfile) + shutil.copyfile(infile, outfile) + else: + # Force fwd slashes on Windows for get_template + # This is a by-design Jinja issue + infile_fwd_slashes = infile.replace(os.path.sep, '/') + + # Render the file + try: + tmpl = env.get_template(infile_fwd_slashes) + except TemplateSyntaxError as exception: + # Disable translated so that printed exception contains verbose + # information about syntax error location + exception.translated = False + raise + rendered_file = tmpl.render(**context) + + # Detect original file newline to output the rendered file + # note: newline='' ensures newlines are not converted + with open(infile, encoding='utf-8', newline='') as rd: + rd.readline() # Read the first line to load 'newlines' value + + # Use `_new_lines` overwrite from context, if configured. + newline = rd.newlines + if context['cookiecutter'].get('_new_lines', False): + newline = context['cookiecutter']['_new_lines'] + logger.debug('Overwriting end line character with %s', newline) + + logger.debug('Writing contents to file %s', outfile) + + with open(outfile, 'w', encoding='utf-8', newline=newline) as fh: + fh.write(rendered_file) + + # Apply file permissions to output file + shutil.copymode(infile, outfile) + + +def render_and_create_dir( + dirname, context, output_dir, environment, overwrite_if_exists=False +): + """Render name of a directory, create the directory, return its path.""" + name_tmpl = environment.from_string(dirname) + rendered_dirname = name_tmpl.render(**context) + + dir_to_create = os.path.normpath(os.path.join(output_dir, rendered_dirname)) + + logger.debug( + 'Rendered dir %s must exist in output_dir %s', dir_to_create, output_dir + ) + + output_dir_exists = os.path.exists(dir_to_create) + + if output_dir_exists: + if overwrite_if_exists: + logger.debug( + 'Output directory %s already exists, overwriting it', dir_to_create + ) + else: + msg = f'Error: "{dir_to_create}" directory already exists' + raise OutputDirExistsException(msg) + else: + make_sure_path_exists(dir_to_create) + + return dir_to_create, not output_dir_exists + + +def ensure_dir_is_templated(dirname): + """Ensure that dirname is a templated directory name.""" + if '{{' in dirname and '}}' in dirname: + return True + else: + raise NonTemplatedInputDirException + + +def _run_hook_from_repo_dir( + repo_dir, hook_name, project_dir, context, delete_project_on_failure +): + """Run hook from repo directory, clean project directory if hook fails. + + :param repo_dir: Project template input directory. + :param hook_name: The hook to execute. + :param project_dir: The directory to execute the script from. + :param context: Cookiecutter project context. + :param delete_project_on_failure: Delete the project directory on hook + failure? + """ + with work_in(repo_dir): + try: + run_hook(hook_name, project_dir, context) + except FailedHookException: + if delete_project_on_failure: + rmtree(project_dir) + logger.error( + "Stopping generation because %s hook " + "script didn't exit successfully", + hook_name, + ) + raise + + +def generate_files( + repo_dir, + context=None, + output_dir='.', + overwrite_if_exists=False, + skip_if_file_exists=False, + accept_hooks=True, +): + """Render the templates and saves them to files. + + :param repo_dir: Project template input directory. + :param context: Dict for populating the template's variables. + :param output_dir: Where to output the generated project dir into. + :param overwrite_if_exists: Overwrite the contents of the output directory + if it exists. + :param accept_hooks: Accept pre and post hooks if set to `True`. + """ + template_dir = find_template(repo_dir) + logger.debug('Generating project from %s...', template_dir) + context = context or OrderedDict([]) + + envvars = context.get('cookiecutter', {}).get('_jinja2_env_vars', {}) + + unrendered_dir = os.path.split(template_dir)[1] + ensure_dir_is_templated(unrendered_dir) + env = StrictEnvironment(context=context, keep_trailing_newline=True, **envvars) + try: + project_dir, output_directory_created = render_and_create_dir( + unrendered_dir, context, output_dir, env, overwrite_if_exists + ) + except UndefinedError as err: + msg = f"Unable to create project directory '{unrendered_dir}'" + raise UndefinedVariableInTemplate(msg, err, context) + + # We want the Jinja path and the OS paths to match. Consequently, we'll: + # + CD to the template folder + # + Set Jinja's path to '.' + # + # In order to build our files to the correct folder(s), we'll use an + # absolute path for the target folder (project_dir) + + project_dir = os.path.abspath(project_dir) + logger.debug('Project directory is %s', project_dir) + + # if we created the output directory, then it's ok to remove it + # if rendering fails + delete_project_on_failure = output_directory_created + + if accept_hooks: + _run_hook_from_repo_dir( + repo_dir, 'pre_gen_project', project_dir, context, delete_project_on_failure + ) + + with work_in(template_dir): + env.loader = FileSystemLoader('.') + + for root, dirs, files in os.walk('.'): + # We must separate the two types of dirs into different lists. + # The reason is that we don't want ``os.walk`` to go through the + # unrendered directories, since they will just be copied. + copy_dirs = [] + render_dirs = [] + + for d in dirs: + d_ = os.path.normpath(os.path.join(root, d)) + # We check the full path, because that's how it can be + # specified in the ``_copy_without_render`` setting, but + # we store just the dir name + if is_copy_only_path(d_, context): + copy_dirs.append(d) + else: + render_dirs.append(d) + + for copy_dir in copy_dirs: + indir = os.path.normpath(os.path.join(root, copy_dir)) + outdir = os.path.normpath(os.path.join(project_dir, indir)) + outdir = env.from_string(outdir).render(**context) + logger.debug('Copying dir %s to %s without rendering', indir, outdir) + shutil.copytree(indir, outdir) + + # We mutate ``dirs``, because we only want to go through these dirs + # recursively + dirs[:] = render_dirs + for d in dirs: + unrendered_dir = os.path.join(project_dir, root, d) + try: + render_and_create_dir( + unrendered_dir, context, output_dir, env, overwrite_if_exists + ) + except UndefinedError as err: + if delete_project_on_failure: + rmtree(project_dir) + _dir = os.path.relpath(unrendered_dir, output_dir) + msg = f"Unable to create directory '{_dir}'" + raise UndefinedVariableInTemplate(msg, err, context) + + for f in files: + infile = os.path.normpath(os.path.join(root, f)) + if is_copy_only_path(infile, context): + outfile_tmpl = env.from_string(infile) + outfile_rendered = outfile_tmpl.render(**context) + outfile = os.path.join(project_dir, outfile_rendered) + logger.debug( + 'Copying file %s to %s without rendering', infile, outfile + ) + shutil.copyfile(infile, outfile) + shutil.copymode(infile, outfile) + continue + try: + generate_file( + project_dir, infile, context, env, skip_if_file_exists + ) + except UndefinedError as err: + if delete_project_on_failure: + rmtree(project_dir) + msg = f"Unable to create file '{infile}'" + raise UndefinedVariableInTemplate(msg, err, context) + + if accept_hooks: + _run_hook_from_repo_dir( + repo_dir, + 'post_gen_project', + project_dir, + context, + delete_project_on_failure, + ) + + return project_dir diff --git a/third_party/python/cookiecutter/cookiecutter/hooks.py b/third_party/python/cookiecutter/cookiecutter/hooks.py new file mode 100644 index 0000000000000..763287c584ade --- /dev/null +++ b/third_party/python/cookiecutter/cookiecutter/hooks.py @@ -0,0 +1,131 @@ +"""Functions for discovering and executing various cookiecutter hooks.""" +import errno +import logging +import os +import subprocess # nosec +import sys +import tempfile + +from cookiecutter import utils +from cookiecutter.environment import StrictEnvironment +from cookiecutter.exceptions import FailedHookException + +logger = logging.getLogger(__name__) + +_HOOKS = [ + 'pre_gen_project', + 'post_gen_project', +] +EXIT_SUCCESS = 0 + + +def valid_hook(hook_file, hook_name): + """Determine if a hook file is valid. + + :param hook_file: The hook file to consider for validity + :param hook_name: The hook to find + :return: The hook file validity + """ + filename = os.path.basename(hook_file) + basename = os.path.splitext(filename)[0] + + matching_hook = basename == hook_name + supported_hook = basename in _HOOKS + backup_file = filename.endswith('~') + + return matching_hook and supported_hook and not backup_file + + +def find_hook(hook_name, hooks_dir='hooks'): + """Return a dict of all hook scripts provided. + + Must be called with the project template as the current working directory. + Dict's key will be the hook/script's name, without extension, while values + will be the absolute path to the script. Missing scripts will not be + included in the returned dict. + + :param hook_name: The hook to find + :param hooks_dir: The hook directory in the template + :return: The absolute path to the hook script or None + """ + logger.debug('hooks_dir is %s', os.path.abspath(hooks_dir)) + + if not os.path.isdir(hooks_dir): + logger.debug('No hooks/dir in template_dir') + return None + + scripts = [] + for hook_file in os.listdir(hooks_dir): + if valid_hook(hook_file, hook_name): + scripts.append(os.path.abspath(os.path.join(hooks_dir, hook_file))) + + if len(scripts) == 0: + return None + return scripts + + +def run_script(script_path, cwd='.'): + """Execute a script from a working directory. + + :param script_path: Absolute path to the script to run. + :param cwd: The directory to run the script from. + """ + run_thru_shell = sys.platform.startswith('win') + if script_path.endswith('.py'): + script_command = [sys.executable, script_path] + else: + script_command = [script_path] + + utils.make_executable(script_path) + + try: + proc = subprocess.Popen(script_command, shell=run_thru_shell, cwd=cwd) # nosec + exit_status = proc.wait() + if exit_status != EXIT_SUCCESS: + raise FailedHookException( + f'Hook script failed (exit status: {exit_status})' + ) + except OSError as os_error: + if os_error.errno == errno.ENOEXEC: + raise FailedHookException( + 'Hook script failed, might be an empty file or missing a shebang' + ) + raise FailedHookException(f'Hook script failed (error: {os_error})') + + +def run_script_with_context(script_path, cwd, context): + """Execute a script after rendering it with Jinja. + + :param script_path: Absolute path to the script to run. + :param cwd: The directory to run the script from. + :param context: Cookiecutter project template context. + """ + _, extension = os.path.splitext(script_path) + + with open(script_path, encoding='utf-8') as file: + contents = file.read() + + with tempfile.NamedTemporaryFile(delete=False, mode='wb', suffix=extension) as temp: + env = StrictEnvironment(context=context, keep_trailing_newline=True) + template = env.from_string(contents) + output = template.render(**context) + temp.write(output.encode('utf-8')) + + run_script(temp.name, cwd) + + +def run_hook(hook_name, project_dir, context): + """ + Try to find and execute a hook from the specified project directory. + + :param hook_name: The hook to execute. + :param project_dir: The directory to execute the script from. + :param context: Cookiecutter project context. + """ + scripts = find_hook(hook_name) + if not scripts: + logger.debug('No %s hook found', hook_name) + return + logger.debug('Running hook %s', hook_name) + for script in scripts: + run_script_with_context(script, project_dir, context) diff --git a/third_party/python/cookiecutter/cookiecutter/log.py b/third_party/python/cookiecutter/cookiecutter/log.py new file mode 100644 index 0000000000000..d7633c5715aec --- /dev/null +++ b/third_party/python/cookiecutter/cookiecutter/log.py @@ -0,0 +1,51 @@ +"""Module for setting up logging.""" +import logging +import sys + +LOG_LEVELS = { + 'DEBUG': logging.DEBUG, + 'INFO': logging.INFO, + 'WARNING': logging.WARNING, + 'ERROR': logging.ERROR, + 'CRITICAL': logging.CRITICAL, +} + +LOG_FORMATS = { + 'DEBUG': '%(levelname)s %(name)s: %(message)s', + 'INFO': '%(levelname)s: %(message)s', +} + + +def configure_logger(stream_level='DEBUG', debug_file=None): + """Configure logging for cookiecutter. + + Set up logging to stdout with given level. If ``debug_file`` is given set + up logging to file with DEBUG level. + """ + # Set up 'cookiecutter' logger + logger = logging.getLogger('cookiecutter') + logger.setLevel(logging.DEBUG) + + # Remove all attached handlers, in case there was + # a logger with using the name 'cookiecutter' + del logger.handlers[:] + + # Create a file handler if a log file is provided + if debug_file is not None: + debug_formatter = logging.Formatter(LOG_FORMATS['DEBUG']) + file_handler = logging.FileHandler(debug_file) + file_handler.setLevel(LOG_LEVELS['DEBUG']) + file_handler.setFormatter(debug_formatter) + logger.addHandler(file_handler) + + # Get settings based on the given stream_level + log_formatter = logging.Formatter(LOG_FORMATS[stream_level]) + log_level = LOG_LEVELS[stream_level] + + # Create a stream handler + stream_handler = logging.StreamHandler(stream=sys.stdout) + stream_handler.setLevel(log_level) + stream_handler.setFormatter(log_formatter) + logger.addHandler(stream_handler) + + return logger diff --git a/third_party/python/cookiecutter/cookiecutter/main.py b/third_party/python/cookiecutter/cookiecutter/main.py new file mode 100644 index 0000000000000..bc2f262dfba27 --- /dev/null +++ b/third_party/python/cookiecutter/cookiecutter/main.py @@ -0,0 +1,140 @@ +""" +Main entry point for the `cookiecutter` command. + +The code in this module is also a good example of how to use Cookiecutter as a +library rather than a script. +""" +from copy import copy +import logging +import os +import sys + +from cookiecutter.config import get_user_config +from cookiecutter.exceptions import InvalidModeException +from cookiecutter.generate import generate_context, generate_files +from cookiecutter.prompt import prompt_for_config +from cookiecutter.replay import dump, load +from cookiecutter.repository import determine_repo_dir +from cookiecutter.utils import rmtree + +logger = logging.getLogger(__name__) + + +def cookiecutter( + template, + checkout=None, + no_input=False, + extra_context=None, + replay=None, + overwrite_if_exists=False, + output_dir='.', + config_file=None, + default_config=False, + password=None, + directory=None, + skip_if_file_exists=False, + accept_hooks=True, +): + """ + Run Cookiecutter just as if using it from the command line. + + :param template: A directory containing a project template directory, + or a URL to a git repository. + :param checkout: The branch, tag or commit ID to checkout after clone. + :param no_input: Prompt the user at command line for manual configuration? + :param extra_context: A dictionary of context that overrides default + and user configuration. + :param replay: Do not prompt for input, instead read from saved json. If + ``True`` read from the ``replay_dir``. + if it exists + :param output_dir: Where to output the generated project dir into. + :param config_file: User configuration file path. + :param default_config: Use default values rather than a config file. + :param password: The password to use when extracting the repository. + :param directory: Relative path to a cookiecutter template in a repository. + :param accept_hooks: Accept pre and post hooks if set to `True`. + """ + if replay and ((no_input is not False) or (extra_context is not None)): + err_msg = ( + "You can not use both replay and no_input or extra_context " + "at the same time." + ) + raise InvalidModeException(err_msg) + + config_dict = get_user_config( + config_file=config_file, + default_config=default_config, + ) + + repo_dir, cleanup = determine_repo_dir( + template=template, + abbreviations=config_dict['abbreviations'], + clone_to_dir=config_dict['cookiecutters_dir'], + checkout=checkout, + no_input=no_input, + password=password, + directory=directory, + ) + import_patch = _patch_import_path_for_repo(repo_dir) + + template_name = os.path.basename(os.path.abspath(repo_dir)) + + if replay: + with import_patch: + if isinstance(replay, bool): + context = load(config_dict['replay_dir'], template_name) + else: + path, template_name = os.path.split(os.path.splitext(replay)[0]) + context = load(path, template_name) + else: + context_file = os.path.join(repo_dir, 'cookiecutter.json') + logger.debug('context_file is %s', context_file) + + context = generate_context( + context_file=context_file, + default_context=config_dict['default_context'], + extra_context=extra_context, + ) + + # prompt the user to manually configure at the command line. + # except when 'no-input' flag is set + with import_patch: + context['cookiecutter'] = prompt_for_config(context, no_input) + + # include template dir or url in the context dict + context['cookiecutter']['_template'] = template + + # include output+dir in the context dict + context['cookiecutter']['_output_dir'] = os.path.abspath(output_dir) + + dump(config_dict['replay_dir'], template_name, context) + + # Create project from local context and project template. + with import_patch: + result = generate_files( + repo_dir=repo_dir, + context=context, + overwrite_if_exists=overwrite_if_exists, + skip_if_file_exists=skip_if_file_exists, + output_dir=output_dir, + accept_hooks=accept_hooks, + ) + + # Cleanup (if required) + if cleanup: + rmtree(repo_dir) + + return result + + +class _patch_import_path_for_repo: + def __init__(self, repo_dir): + self._repo_dir = repo_dir + self._path = None + + def __enter__(self): + self._path = copy(sys.path) + sys.path.append(self._repo_dir) + + def __exit__(self, type, value, traceback): + sys.path = self._path diff --git a/third_party/python/cookiecutter/cookiecutter/prompt.py b/third_party/python/cookiecutter/cookiecutter/prompt.py new file mode 100644 index 0000000000000..f06cdc3c0b40f --- /dev/null +++ b/third_party/python/cookiecutter/cookiecutter/prompt.py @@ -0,0 +1,236 @@ +"""Functions for prompting the user for project info.""" +import functools +import json +from collections import OrderedDict + +import click +from jinja2.exceptions import UndefinedError + +from cookiecutter.environment import StrictEnvironment +from cookiecutter.exceptions import UndefinedVariableInTemplate + + +def read_user_variable(var_name, default_value): + """Prompt user for variable and return the entered value or given default. + + :param str var_name: Variable of the context to query the user + :param default_value: Value that will be returned if no input happens + """ + # Please see https://click.palletsprojects.com/en/7.x/api/#click.prompt + return click.prompt(var_name, default=default_value) + + +def read_user_yes_no(question, default_value): + """Prompt the user to reply with 'yes' or 'no' (or equivalent values). + + Note: + Possible choices are 'true', '1', 'yes', 'y' or 'false', '0', 'no', 'n' + + :param str question: Question to the user + :param default_value: Value that will be returned if no input happens + """ + # Please see https://click.palletsprojects.com/en/7.x/api/#click.prompt + return click.prompt(question, default=default_value, type=click.BOOL) + + +def read_repo_password(question): + """Prompt the user to enter a password. + + :param str question: Question to the user + """ + # Please see https://click.palletsprojects.com/en/7.x/api/#click.prompt + return click.prompt(question, hide_input=True) + + +def read_user_choice(var_name, options): + """Prompt the user to choose from several options for the given variable. + + The first item will be returned if no input happens. + + :param str var_name: Variable as specified in the context + :param list options: Sequence of options that are available to select from + :return: Exactly one item of ``options`` that has been chosen by the user + """ + # Please see https://click.palletsprojects.com/en/7.x/api/#click.prompt + if not isinstance(options, list): + raise TypeError + + if not options: + raise ValueError + + choice_map = OrderedDict((f'{i}', value) for i, value in enumerate(options, 1)) + choices = choice_map.keys() + default = '1' + + choice_lines = ['{} - {}'.format(*c) for c in choice_map.items()] + prompt = '\n'.join( + ( + f'Select {var_name}:', + '\n'.join(choice_lines), + 'Choose from {}'.format(', '.join(choices)), + ) + ) + + user_choice = click.prompt( + prompt, type=click.Choice(choices), default=default, show_choices=False + ) + return choice_map[user_choice] + + +DEFAULT_DISPLAY = 'default' + + +def process_json(user_value, default_value=None): + """Load user-supplied value as a JSON dict. + + :param str user_value: User-supplied value to load as a JSON dict + """ + if user_value == DEFAULT_DISPLAY: + # Return the given default w/o any processing + return default_value + + try: + user_dict = json.loads(user_value, object_pairs_hook=OrderedDict) + except Exception: + # Leave it up to click to ask the user again + raise click.UsageError('Unable to decode to JSON.') + + if not isinstance(user_dict, dict): + # Leave it up to click to ask the user again + raise click.UsageError('Requires JSON dict.') + + return user_dict + + +def read_user_dict(var_name, default_value): + """Prompt the user to provide a dictionary of data. + + :param str var_name: Variable as specified in the context + :param default_value: Value that will be returned if no input is provided + :return: A Python dictionary to use in the context. + """ + # Please see https://click.palletsprojects.com/en/7.x/api/#click.prompt + if not isinstance(default_value, dict): + raise TypeError + + user_value = click.prompt( + var_name, + default=DEFAULT_DISPLAY, + type=click.STRING, + value_proc=functools.partial(process_json, default_value=default_value), + ) + + if click.__version__.startswith("7.") and user_value == DEFAULT_DISPLAY: + # click 7.x does not invoke value_proc on the default value. + return default_value # pragma: no cover + return user_value + + +def render_variable(env, raw, cookiecutter_dict): + """Render the next variable to be displayed in the user prompt. + + Inside the prompting taken from the cookiecutter.json file, this renders + the next variable. For example, if a project_name is "Peanut Butter + Cookie", the repo_name could be be rendered with: + + `{{ cookiecutter.project_name.replace(" ", "_") }}`. + + This is then presented to the user as the default. + + :param Environment env: A Jinja2 Environment object. + :param raw: The next value to be prompted for by the user. + :param dict cookiecutter_dict: The current context as it's gradually + being populated with variables. + :return: The rendered value for the default variable. + """ + if raw is None: + return None + elif isinstance(raw, dict): + return { + render_variable(env, k, cookiecutter_dict): render_variable( + env, v, cookiecutter_dict + ) + for k, v in raw.items() + } + elif isinstance(raw, list): + return [render_variable(env, v, cookiecutter_dict) for v in raw] + elif not isinstance(raw, str): + raw = str(raw) + + template = env.from_string(raw) + + rendered_template = template.render(cookiecutter=cookiecutter_dict) + return rendered_template + + +def prompt_choice_for_config(cookiecutter_dict, env, key, options, no_input): + """Prompt user with a set of options to choose from. + + Each of the possible choices is rendered beforehand. + """ + rendered_options = [render_variable(env, raw, cookiecutter_dict) for raw in options] + + if no_input: + return rendered_options[0] + return read_user_choice(key, rendered_options) + + +def prompt_for_config(context, no_input=False): + """Prompt user to enter a new config. + + :param dict context: Source for field names and sample values. + :param no_input: Prompt the user at command line for manual configuration? + """ + cookiecutter_dict = OrderedDict([]) + env = StrictEnvironment(context=context) + + # First pass: Handle simple and raw variables, plus choices. + # These must be done first because the dictionaries keys and + # values might refer to them. + for key, raw in context['cookiecutter'].items(): + if key.startswith('_') and not key.startswith('__'): + cookiecutter_dict[key] = raw + continue + elif key.startswith('__'): + cookiecutter_dict[key] = render_variable(env, raw, cookiecutter_dict) + continue + + try: + if isinstance(raw, list): + # We are dealing with a choice variable + val = prompt_choice_for_config( + cookiecutter_dict, env, key, raw, no_input + ) + cookiecutter_dict[key] = val + elif not isinstance(raw, dict): + # We are dealing with a regular variable + val = render_variable(env, raw, cookiecutter_dict) + + if not no_input: + val = read_user_variable(key, val) + + cookiecutter_dict[key] = val + except UndefinedError as err: + msg = f"Unable to render variable '{key}'" + raise UndefinedVariableInTemplate(msg, err, context) + + # Second pass; handle the dictionaries. + for key, raw in context['cookiecutter'].items(): + # Skip private type dicts not ot be rendered. + if key.startswith('_') and not key.startswith('__'): + continue + + try: + if isinstance(raw, dict): + # We are dealing with a dict variable + val = render_variable(env, raw, cookiecutter_dict) + + if not no_input and not key.startswith('__'): + val = read_user_dict(key, val) + + cookiecutter_dict[key] = val + except UndefinedError as err: + msg = f"Unable to render variable '{key}'" + raise UndefinedVariableInTemplate(msg, err, context) + + return cookiecutter_dict diff --git a/third_party/python/cookiecutter/cookiecutter/replay.py b/third_party/python/cookiecutter/cookiecutter/replay.py new file mode 100644 index 0000000000000..9730e84da807e --- /dev/null +++ b/third_party/python/cookiecutter/cookiecutter/replay.py @@ -0,0 +1,52 @@ +""" +cookiecutter.replay. + +------------------- +""" +import json +import os + +from cookiecutter.utils import make_sure_path_exists + + +def get_file_name(replay_dir, template_name): + """Get the name of file.""" + suffix = '.json' if not template_name.endswith('.json') else '' + file_name = f'{template_name}{suffix}' + return os.path.join(replay_dir, file_name) + + +def dump(replay_dir, template_name, context): + """Write json data to file.""" + if not make_sure_path_exists(replay_dir): + raise OSError(f'Unable to create replay dir at {replay_dir}') + + if not isinstance(template_name, str): + raise TypeError('Template name is required to be of type str') + + if not isinstance(context, dict): + raise TypeError('Context is required to be of type dict') + + if 'cookiecutter' not in context: + raise ValueError('Context is required to contain a cookiecutter key') + + replay_file = get_file_name(replay_dir, template_name) + + with open(replay_file, 'w') as outfile: + json.dump(context, outfile, indent=2) + + +def load(replay_dir, template_name): + """Read json data from file.""" + if not isinstance(template_name, str): + raise TypeError('Template name is required to be of type str') + + replay_file = get_file_name(replay_dir, template_name) + + with open(replay_file) as infile: + context = json.load(infile) + + if 'cookiecutter' not in context: + raise ValueError('Context is required to contain a cookiecutter key') + + return context diff --git a/third_party/python/cookiecutter/cookiecutter/repository.py b/third_party/python/cookiecutter/cookiecutter/repository.py new file mode 100644 index 0000000000000..f8e6fcbcc5ea7 --- /dev/null +++ b/third_party/python/cookiecutter/cookiecutter/repository.py @@ -0,0 +1,130 @@ +"""Cookiecutter repository functions.""" +import os +import re + +from cookiecutter.exceptions import RepositoryNotFound +from cookiecutter.vcs import clone +from cookiecutter.zipfile import unzip + +REPO_REGEX = re.compile( + r""" +# something like git:// ssh:// file:// etc. +((((git|hg)\+)?(git|ssh|file|https?):(//)?) + | # or + (\w+@[\w\.]+) # something like user@... +) +""", + re.VERBOSE, +) + + +def is_repo_url(value): + """Return True if value is a repository URL.""" + return bool(REPO_REGEX.match(value)) + + +def is_zip_file(value): + """Return True if value is a zip file.""" + return value.lower().endswith('.zip') + + +def expand_abbreviations(template, abbreviations): + """Expand abbreviations in a template name. + + :param template: The project template name. + :param abbreviations: Abbreviation definitions. + """ + if template in abbreviations: + return abbreviations[template] + + # Split on colon. If there is no colon, rest will be empty + # and prefix will be the whole template + prefix, sep, rest = template.partition(':') + if prefix in abbreviations: + return abbreviations[prefix].format(rest) + + return template + + +def repository_has_cookiecutter_json(repo_directory): + """Determine if `repo_directory` contains a `cookiecutter.json` file. + + :param repo_directory: The candidate repository directory. + :return: True if the `repo_directory` is valid, else False. + """ + repo_directory_exists = os.path.isdir(repo_directory) + + repo_config_exists = os.path.isfile( + os.path.join(repo_directory, 'cookiecutter.json') + ) + return repo_directory_exists and repo_config_exists + + +def determine_repo_dir( + template, + abbreviations, + clone_to_dir, + checkout, + no_input, + password=None, + directory=None, +): + """ + Locate the repository directory from a template reference. + + Applies repository abbreviations to the template reference. + If the template refers to a repository URL, clone it. + If the template is a path to a local repository, use it. + + :param template: A directory containing a project template directory, + or a URL to a git repository. + :param abbreviations: A dictionary of repository abbreviation + definitions. + :param clone_to_dir: The directory to clone the repository into. + :param checkout: The branch, tag or commit ID to checkout after clone. + :param no_input: Prompt the user at command line for manual configuration? + :param password: The password to use when extracting the repository. + :param directory: Directory within repo where cookiecutter.json lives. + :return: A tuple containing the cookiecutter template directory, and + a boolean descriving whether that directory should be cleaned up + after the template has been instantiated. + :raises: `RepositoryNotFound` if a repository directory could not be found. + """ + template = expand_abbreviations(template, abbreviations) + + if is_zip_file(template): + unzipped_dir = unzip( + zip_uri=template, + is_url=is_repo_url(template), + clone_to_dir=clone_to_dir, + no_input=no_input, + password=password, + ) + repository_candidates = [unzipped_dir] + cleanup = True + elif is_repo_url(template): + cloned_repo = clone( + repo_url=template, + checkout=checkout, + clone_to_dir=clone_to_dir, + no_input=no_input, + ) + repository_candidates = [cloned_repo] + cleanup = False + else: + repository_candidates = [template, os.path.join(clone_to_dir, template)] + cleanup = False + + if directory: + repository_candidates = [ + os.path.join(s, directory) for s in repository_candidates + ] + + for repo_candidate in repository_candidates: + if repository_has_cookiecutter_json(repo_candidate): + return repo_candidate, cleanup + + raise RepositoryNotFound( + 'A valid repository for "{}" could not be found in the following ' + 'locations:\n{}'.format(template, '\n'.join(repository_candidates)) + ) diff --git a/third_party/python/cookiecutter/cookiecutter/utils.py b/third_party/python/cookiecutter/cookiecutter/utils.py new file mode 100644 index 0000000000000..4750a2663e705 --- /dev/null +++ b/third_party/python/cookiecutter/cookiecutter/utils.py @@ -0,0 +1,120 @@ +"""Helper functions used throughout Cookiecutter.""" +import contextlib +import errno +import logging +import os +import shutil +import stat +import sys + +from cookiecutter.prompt import read_user_yes_no +from jinja2.ext import Extension + +logger = logging.getLogger(__name__) + + +def force_delete(func, path, exc_info): + """Error handler for `shutil.rmtree()` equivalent to `rm -rf`. + + Usage: `shutil.rmtree(path, onerror=force_delete)` + From https://docs.python.org/3/library/shutil.html#rmtree-example + """ + os.chmod(path, stat.S_IWRITE) + func(path) + + +def rmtree(path): + """Remove a directory and all its contents. Like rm -rf on Unix. + + :param path: A directory path. + """ + shutil.rmtree(path, onerror=force_delete) + + +def make_sure_path_exists(path): + """Ensure that a directory exists. + + :param path: A directory path. + """ + logger.debug('Making sure path exists: %s', path) + try: + os.makedirs(path) + logger.debug('Created directory at: %s', path) + except OSError as exception: + if exception.errno != errno.EEXIST: + return False + return True + + +@contextlib.contextmanager +def work_in(dirname=None): + """Context manager version of os.chdir. + + When exited, returns to the working directory prior to entering. + """ + curdir = os.getcwd() + try: + if dirname is not None: + os.chdir(dirname) + yield + finally: + os.chdir(curdir) + + +def make_executable(script_path): + """Make `script_path` executable. + + :param script_path: The file to change + """ + status = os.stat(script_path) + os.chmod(script_path, status.st_mode | stat.S_IEXEC) + + +def prompt_and_delete(path, no_input=False): + """ + Ask user if it's okay to delete the previously-downloaded file/directory. + + If yes, delete it. If no, checks to see if the old version should be + reused. If yes, it's reused; otherwise, Cookiecutter exits. + + :param path: Previously downloaded zipfile. + :param no_input: Suppress prompt to delete repo and just delete it. + :return: True if the content was deleted + """ + # Suppress prompt if called via API + if no_input: + ok_to_delete = True + else: + question = ( + "You've downloaded {} before. Is it okay to delete and re-download it?" + ).format(path) + + ok_to_delete = read_user_yes_no(question, 'yes') + + if ok_to_delete: + if os.path.isdir(path): + rmtree(path) + else: + os.remove(path) + return True + else: + ok_to_reuse = read_user_yes_no( + "Do you want to re-use the existing version?", 'yes' + ) + + if ok_to_reuse: + return False + + sys.exit() + + +def simple_filter(filter_function): + """Decorate a function to wrap it in a simplified jinja2 extension.""" + + class SimpleFilterExtension(Extension): + def __init__(self, environment): + super().__init__(environment) + environment.filters[filter_function.__name__] = filter_function + + SimpleFilterExtension.__name__ = filter_function.__name__ + return SimpleFilterExtension diff --git a/third_party/python/cookiecutter/cookiecutter/vcs.py b/third_party/python/cookiecutter/cookiecutter/vcs.py new file mode 100644 index 0000000000000..bb4356b317455 --- /dev/null +++ b/third_party/python/cookiecutter/cookiecutter/vcs.py @@ -0,0 +1,125 @@ +"""Helper functions for working with version control systems.""" +import logging +import os +import subprocess # nosec +from shutil import which + +from cookiecutter.exceptions import ( + RepositoryCloneFailed, + RepositoryNotFound, + UnknownRepoType, + VCSNotInstalled, +) +from cookiecutter.utils import make_sure_path_exists, prompt_and_delete + +logger = logging.getLogger(__name__) + + +BRANCH_ERRORS = [ + 'error: pathspec', + 'unknown revision', +] + + +def identify_repo(repo_url): + """Determine if `repo_url` should be treated as a URL to a git or hg repo. + + Repos can be identified by prepending "hg+" or "git+" to the repo URL. + + :param repo_url: Repo URL of unknown type. + :returns: ('git', repo_url), ('hg', repo_url), or None. + """ + repo_url_values = repo_url.split('+') + if len(repo_url_values) == 2: + repo_type = repo_url_values[0] + if repo_type in ["git", "hg"]: + return repo_type, repo_url_values[1] + else: + raise UnknownRepoType + else: + if 'git' in repo_url: + return 'git', repo_url + elif 'bitbucket' in repo_url: + return 'hg', repo_url + else: + raise UnknownRepoType + + +def is_vcs_installed(repo_type): + """ + Check if the version control system for a repo type is installed. + + :param repo_type: + """ + return bool(which(repo_type)) + + +def clone(repo_url, checkout=None, clone_to_dir='.', no_input=False): + """Clone a repo to the current directory. + + :param repo_url: Repo URL of unknown type. + :param checkout: The branch, tag or commit ID to checkout after clone. + :param clone_to_dir: The directory to clone to. + Defaults to the current directory. + :param no_input: Suppress all user prompts when calling via API. + :returns: str with path to the new directory of the repository. + """ + # Ensure that clone_to_dir exists + clone_to_dir = os.path.expanduser(clone_to_dir) + make_sure_path_exists(clone_to_dir) + + # identify the repo_type + repo_type, repo_url = identify_repo(repo_url) + + # check that the appropriate VCS for the repo_type is installed + if not is_vcs_installed(repo_type): + msg = f"'{repo_type}' is not installed." + raise VCSNotInstalled(msg) + + repo_url = repo_url.rstrip('/') + repo_name = os.path.split(repo_url)[1] + if repo_type == 'git': + repo_name = repo_name.split(':')[-1].rsplit('.git')[0] + repo_dir = os.path.normpath(os.path.join(clone_to_dir, repo_name)) + if repo_type == 'hg': + repo_dir = os.path.normpath(os.path.join(clone_to_dir, repo_name)) + logger.debug(f'repo_dir is {repo_dir}') + + if os.path.isdir(repo_dir): + clone = prompt_and_delete(repo_dir, no_input=no_input) + else: + clone = True + + if clone: + try: + subprocess.check_output( # nosec + [repo_type, 'clone', repo_url], + cwd=clone_to_dir, + stderr=subprocess.STDOUT, + ) + if checkout is not None: + checkout_params = [checkout] + # Avoid Mercurial "--config" and "--debugger" injection vulnerability + if repo_type == "hg": + checkout_params.insert(0, "--") + subprocess.check_output( # nosec + [repo_type, 'checkout', *checkout_params], + cwd=repo_dir, + stderr=subprocess.STDOUT, + ) + except subprocess.CalledProcessError as clone_error: + output = clone_error.output.decode('utf-8') + if 'not found' in output.lower(): + raise RepositoryNotFound( + f'The repository {repo_url} could not be found, ' + 'have you made a typo?' + ) + if any(error in output for error in BRANCH_ERRORS): + raise RepositoryCloneFailed( + f'The {checkout} branch of repository ' + f'{repo_url} could not found, have you made a typo?' + ) + logger.error('git clone failed with error: %s', output) + raise + + return repo_dir diff --git a/third_party/python/cookiecutter/cookiecutter/zipfile.py b/third_party/python/cookiecutter/cookiecutter/zipfile.py new file mode 100644 index 0000000000000..7395ce61bcc7e --- /dev/null +++ b/third_party/python/cookiecutter/cookiecutter/zipfile.py @@ -0,0 +1,112 @@ +"""Utility functions for handling and fetching repo archives in zip format.""" +import os +import tempfile +from zipfile import BadZipFile, ZipFile + +import requests + +from cookiecutter.exceptions import InvalidZipRepository +from cookiecutter.prompt import read_repo_password +from cookiecutter.utils import make_sure_path_exists, prompt_and_delete + + +def unzip(zip_uri, is_url, clone_to_dir='.', no_input=False, password=None): + """Download and unpack a zipfile at a given URI. + + This will download the zipfile to the cookiecutter repository, + and unpack into a temporary directory. + + :param zip_uri: The URI for the zipfile. + :param is_url: Is the zip URI a URL or a file? + :param clone_to_dir: The cookiecutter repository directory + to put the archive into. + :param no_input: Suppress any prompts + :param password: The password to use when unpacking the repository. + """ + # Ensure that clone_to_dir exists + clone_to_dir = os.path.expanduser(clone_to_dir) + make_sure_path_exists(clone_to_dir) + + if is_url: + # Build the name of the cached zipfile, + # and prompt to delete if it already exists. + identifier = zip_uri.rsplit('/', 1)[1] + zip_path = os.path.join(clone_to_dir, identifier) + + if os.path.exists(zip_path): + download = prompt_and_delete(zip_path, no_input=no_input) + else: + download = True + + if download: + # (Re) download the zipfile + r = requests.get(zip_uri, stream=True) + with open(zip_path, 'wb') as f: + for chunk in r.iter_content(chunk_size=1024): + if chunk: # filter out keep-alive new chunks + f.write(chunk) + else: + # Just use the local zipfile as-is. + zip_path = os.path.abspath(zip_uri) + + # Now unpack the repository. The zipfile will be unpacked + # into a temporary directory + try: + zip_file = ZipFile(zip_path) + + if len(zip_file.namelist()) == 0: + raise InvalidZipRepository(f'Zip repository {zip_uri} is empty') + + # The first record in the zipfile should be the directory entry for + # the archive. If it isn't a directory, there's a problem. + first_filename = zip_file.namelist()[0] + if not first_filename.endswith('/'): + raise InvalidZipRepository( + 'Zip repository {} does not include ' + 'a top-level directory'.format(zip_uri) + ) + + # Construct the final target directory + project_name = first_filename[:-1] + unzip_base = tempfile.mkdtemp() + unzip_path = os.path.join(unzip_base, project_name) + + # Extract the zip file into the temporary directory + try: + zip_file.extractall(path=unzip_base) + except RuntimeError: + # File is password protected; try to get a password from the + # environment; if that doesn't work, ask the user. + if password is not None: + try: + zip_file.extractall(path=unzip_base, pwd=password.encode('utf-8')) + except RuntimeError: + raise InvalidZipRepository( + 'Invalid password provided for protected repository' + ) + elif no_input: + raise InvalidZipRepository( + 'Unable to unlock password protected repository' + ) + else: + retry = 0 + while retry is not None: + try: + password = read_repo_password('Repo password') + zip_file.extractall( + path=unzip_base, pwd=password.encode('utf-8') + ) + retry = None + except RuntimeError: + retry += 1 + if retry == 3: + raise InvalidZipRepository( + 'Invalid password provided for protected repository' + ) + + except BadZipFile: + raise InvalidZipRepository( + f'Zip repository {zip_uri} is not a valid zip archive:' + ) + + return unzip_path diff --git a/third_party/python/jinja2_time/jinja2_time-0.2.0.dist-info/DESCRIPTION.rst b/third_party/python/jinja2_time/jinja2_time-0.2.0.dist-info/DESCRIPTION.rst new file mode 100644 index 0000000000000..a56fcb8b1423b --- /dev/null +++ b/third_party/python/jinja2_time/jinja2_time-0.2.0.dist-info/DESCRIPTION.rst @@ -0,0 +1,147 @@ +=========== +Jinja2 Time +=========== + +|pypi| |pyversions| |license| |travis-ci| + +Jinja2 Extension for Dates and Times + +.. |pypi| image:: https://img.shields.io/pypi/v/jinja2-time.svg + :target: https://pypi.python.org/pypi/jinja2-time + :alt: PyPI Package + +.. |pyversions| image:: https://img.shields.io/pypi/pyversions/jinja2-time.svg + :target: https://pypi.python.org/pypi/jinja2-time/ + :alt: PyPI Python Versions + +.. |license| image:: https://img.shields.io/pypi/l/jinja2-time.svg + :target: https://pypi.python.org/pypi/jinja2-time + :alt: PyPI Package License + +.. |travis-ci| image:: https://travis-ci.org/hackebrot/jinja2-time.svg?branch=master + :target: https://travis-ci.org/hackebrot/jinja2-time + :alt: See Build Status on Travis CI + +Installation +------------ + +**jinja2-time** is available for download from `PyPI`_ via `pip`_:: + + $ pip install jinja2-time + +It will automatically install `jinja2`_ along with `arrow`_. + +.. _`jinja2`: https://github.com/mitsuhiko/jinja2 +.. _`PyPI`: https://pypi.python.org/pypi +.. _`arrow`: https://github.com/crsmithdev/arrow +.. _`pip`: https://pypi.python.org/pypi/pip/ + +Usage +----- + +Now Tag +~~~~~~~ + +The extension comes with a ``now`` tag that provides convenient access to the +`arrow.now()`_ API from your templates. + +You can control the output by specifying a format, that will be passed to +Python's `strftime()`_: + +.. _`arrow.now()`: http://crsmithdev.com/arrow/#arrow.factory.ArrowFactory.now +.. _`strftime()`: https://docs.python.org/3.5/library/datetime.html#strftime-and-strptime-behavior + +.. code-block:: python + + from jinja2 import Environment + + env = Environment(extensions=['jinja2_time.TimeExtension']) + + # Timezone 'local', default format -> "2015-12-10" + template = env.from_string("{% now 'local' %}") + + # Timezone 'utc', explicit format -> "Thu, 10 Dec 2015 15:49:01" + template = env.from_string("{% now 'utc', '%a, %d %b %Y %H:%M:%S' %}") + + # Timezone 'Europe/Berlin', explicit format -> "CET +0100" + template = env.from_string("{% now 'Europe/Berlin', '%Z %z' %}") + + # Timezone 'utc', explicit format -> "2015" + template = env.from_string("{% now 'utc', '%Y' %}") + + template.render() + +Default Datetime Format +~~~~~~~~~~~~~~~~~~~~~~~ + +**TimeExtension** extends the environment with a ``datetime_format`` attribute. + +It is used as a fallback if you omit the format for ``now``. + +.. code-block:: python + + from jinja2 import Environment + + env = Environment(extensions=['jinja2_time.TimeExtension']) + + env.datetime_format = '%a, %d %b %Y %H:%M:%S' + + # Timezone 'utc', default format -> "Thu, 10 Dec 2015 15:49:01" + template = env.from_string("{% now 'utc' %}") + + template.render() + +Time Offset +~~~~~~~~~~~ + +**jinja2-time** implements a convenient interface to modify ``now`` by a +relative time offset: + +.. code-block:: python + + # Examples for now "2015-12-09 23:33:01" + + # "Thu, 10 Dec 2015 01:33:31" + "{% now 'utc' + 'hours=2, seconds=30' %}" + + # "Wed, 09 Dec 2015 23:22:01" + "{% now 'utc' - 'minutes=11' %}" + + # "07 Dec 2015 23:00:00" + "{% now 'utc' - 'days=2, minutes=33, seconds=1', '%d %b %Y %H:%M:%S' %}" + +Further documentation on the underlying functionality can be found in the +`arrow replace docs`_. + +.. _`arrow replace docs`: http://arrow.readthedocs.io/en/latest/#replace-shift + + +Issues +------ + +If you encounter any problems, please `file an issue`_ along with a detailed description. + +.. _`file an issue`: https://github.com/hackebrot/jinja2-time/issues + + +Code of Conduct +--------------- + +Everyone interacting in the jinja2-time project's codebases, issue trackers, chat +rooms, and mailing lists is expected to follow the `PyPA Code of Conduct`_. + +.. _`PyPA Code of Conduct`: https://www.pypa.io/en/latest/code-of-conduct/ + +License +------- + +Distributed under the terms of the `MIT`_ license, jinja2-time is free and open source software + +.. image:: https://opensource.org/trademarks/osi-certified/web/osi-certified-120x100.png + :align: left + :alt: OSI certified + :target: https://opensource.org/ + +.. _`MIT`: http://opensource.org/licenses/MIT + + diff --git a/third_party/python/jinja2_time/jinja2_time-0.2.0.dist-info/METADATA b/third_party/python/jinja2_time/jinja2_time-0.2.0.dist-info/METADATA new file mode 100644 index 0000000000000..3714572a788ec --- /dev/null +++ b/third_party/python/jinja2_time/jinja2_time-0.2.0.dist-info/METADATA @@ -0,0 +1,174 @@ +Metadata-Version: 2.0 +Name: jinja2-time +Version: 0.2.0 +Summary: Jinja2 Extension for Dates and Times +Home-page: https://github.com/hackebrot/jinja2-time +Author: Raphael Pierzina +Author-email: raphael@hackebrot.de +License: MIT +Keywords: jinja2,extension,time +Platform: UNKNOWN +Classifier: Development Status :: 3 - Alpha +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Natural Language :: English +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Programming Language :: Python +Requires-Dist: jinja2 +Requires-Dist: arrow + +=========== +Jinja2 Time +=========== + +|pypi| |pyversions| |license| |travis-ci| + +Jinja2 Extension for Dates and Times + +.. |pypi| image:: https://img.shields.io/pypi/v/jinja2-time.svg + :target: https://pypi.python.org/pypi/jinja2-time + :alt: PyPI Package + +.. |pyversions| image:: https://img.shields.io/pypi/pyversions/jinja2-time.svg + :target: https://pypi.python.org/pypi/jinja2-time/ + :alt: PyPI Python Versions + +.. |license| image:: https://img.shields.io/pypi/l/jinja2-time.svg + :target: https://pypi.python.org/pypi/jinja2-time + :alt: PyPI Package License + +.. |travis-ci| image:: https://travis-ci.org/hackebrot/jinja2-time.svg?branch=master + :target: https://travis-ci.org/hackebrot/jinja2-time + :alt: See Build Status on Travis CI + +Installation +------------ + +**jinja2-time** is available for download from `PyPI`_ via `pip`_:: + + $ pip install jinja2-time + +It will automatically install `jinja2`_ along with `arrow`_. + +.. _`jinja2`: https://github.com/mitsuhiko/jinja2 +.. _`PyPI`: https://pypi.python.org/pypi +.. _`arrow`: https://github.com/crsmithdev/arrow +.. _`pip`: https://pypi.python.org/pypi/pip/ + +Usage +----- + +Now Tag +~~~~~~~ + +The extension comes with a ``now`` tag that provides convenient access to the +`arrow.now()`_ API from your templates. + +You can control the output by specifying a format, that will be passed to +Python's `strftime()`_: + +.. _`arrow.now()`: http://crsmithdev.com/arrow/#arrow.factory.ArrowFactory.now +.. _`strftime()`: https://docs.python.org/3.5/library/datetime.html#strftime-and-strptime-behavior + +.. code-block:: python + + from jinja2 import Environment + + env = Environment(extensions=['jinja2_time.TimeExtension']) + + # Timezone 'local', default format -> "2015-12-10" + template = env.from_string("{% now 'local' %}") + + # Timezone 'utc', explicit format -> "Thu, 10 Dec 2015 15:49:01" + template = env.from_string("{% now 'utc', '%a, %d %b %Y %H:%M:%S' %}") + + # Timezone 'Europe/Berlin', explicit format -> "CET +0100" + template = env.from_string("{% now 'Europe/Berlin', '%Z %z' %}") + + # Timezone 'utc', explicit format -> "2015" + template = env.from_string("{% now 'utc', '%Y' %}") + + template.render() + +Default Datetime Format +~~~~~~~~~~~~~~~~~~~~~~~ + +**TimeExtension** extends the environment with a ``datetime_format`` attribute. + +It is used as a fallback if you omit the format for ``now``. + +.. code-block:: python + + from jinja2 import Environment + + env = Environment(extensions=['jinja2_time.TimeExtension']) + + env.datetime_format = '%a, %d %b %Y %H:%M:%S' + + # Timezone 'utc', default format -> "Thu, 10 Dec 2015 15:49:01" + template = env.from_string("{% now 'utc' %}") + + template.render() + +Time Offset +~~~~~~~~~~~ + +**jinja2-time** implements a convenient interface to modify ``now`` by a +relative time offset: + +.. code-block:: python + + # Examples for now "2015-12-09 23:33:01" + + # "Thu, 10 Dec 2015 01:33:31" + "{% now 'utc' + 'hours=2, seconds=30' %}" + + # "Wed, 09 Dec 2015 23:22:01" + "{% now 'utc' - 'minutes=11' %}" + + # "07 Dec 2015 23:00:00" + "{% now 'utc' - 'days=2, minutes=33, seconds=1', '%d %b %Y %H:%M:%S' %}" + +Further documentation on the underlying functionality can be found in the +`arrow replace docs`_. + +.. _`arrow replace docs`: http://arrow.readthedocs.io/en/latest/#replace-shift + + +Issues +------ + +If you encounter any problems, please `file an issue`_ along with a detailed description. + +.. _`file an issue`: https://github.com/hackebrot/jinja2-time/issues + + +Code of Conduct +--------------- + +Everyone interacting in the jinja2-time project's codebases, issue trackers, chat +rooms, and mailing lists is expected to follow the `PyPA Code of Conduct`_. + +.. _`PyPA Code of Conduct`: https://www.pypa.io/en/latest/code-of-conduct/ + +License +------- + +Distributed under the terms of the `MIT`_ license, jinja2-time is free and open source software + +.. image:: https://opensource.org/trademarks/osi-certified/web/osi-certified-120x100.png + :align: left + :alt: OSI certified + :target: https://opensource.org/ + +.. _`MIT`: http://opensource.org/licenses/MIT + + diff --git a/third_party/python/jinja2_time/jinja2_time-0.2.0.dist-info/RECORD b/third_party/python/jinja2_time/jinja2_time-0.2.0.dist-info/RECORD new file mode 100644 index 0000000000000..e647ee37262ba --- /dev/null +++ b/third_party/python/jinja2_time/jinja2_time-0.2.0.dist-info/RECORD @@ -0,0 +1,8 @@ +jinja2_time/__init__.py,sha256=M48p6Z1Rsv5OOL-q7kTRog1JPSquiRYOOxkhO3jJcrE,184 +jinja2_time/jinja2_time.py,sha256=MbBuzEMYSU_fHdx9M7ZfGGvEwdAnDOstQviU6GNPUpE,2080 +jinja2_time-0.2.0.dist-info/DESCRIPTION.rst,sha256=ATjmUYtQxCdXoMkTixBnyyF9lU1AhshfMJxY5ZrJkqo,4109 +jinja2_time-0.2.0.dist-info/METADATA,sha256=9nhHvQYAYZcafV5VigNdV_FB1btn5xLSY-kAOvt7bUk,5135 +jinja2_time-0.2.0.dist-info/metadata.json,sha256=i6xJP3_vGVQGUno_3vOjK9gFRtyFuyTnMy2k76rf1Js,1140 +jinja2_time-0.2.0.dist-info/RECORD,, +jinja2_time-0.2.0.dist-info/top_level.txt,sha256=oQxkTcv8SKoVYEKSYSrhkn5AsQR1-p1rAqo-Ouu0mNA,12 +jinja2_time-0.2.0.dist-info/WHEEL,sha256=AvR0WeTpDaxT645bl5FQxUK6NPsTls2ttpcGJg3j1Xg,110 diff --git a/third_party/python/jinja2_time/jinja2_time-0.2.0.dist-info/WHEEL b/third_party/python/jinja2_time/jinja2_time-0.2.0.dist-info/WHEEL new file mode 100644 index 0000000000000..9dff69d86102d --- /dev/null +++ b/third_party/python/jinja2_time/jinja2_time-0.2.0.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.24.0) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/third_party/python/jinja2_time/jinja2_time-0.2.0.dist-info/metadata.json b/third_party/python/jinja2_time/jinja2_time-0.2.0.dist-info/metadata.json new file mode 100644 index 0000000000000..d1de0ed870652 --- /dev/null +++ b/third_party/python/jinja2_time/jinja2_time-0.2.0.dist-info/metadata.json @@ -0,0 +1 @@ +{"extras": [], "run_requires": [{"requires": ["jinja2", "arrow"]}], "version": "0.2.0", "keywords": ["jinja2", "extension", "time"], "license": "MIT", "summary": "Jinja2 Extension for Dates and Times", "generator": "bdist_wheel (0.24.0)", "metadata_version": "2.0", "name": "jinja2-time", "extensions": {"python.details": {"contacts": [{"email": "raphael@hackebrot.de", "name": "Raphael Pierzina", "role": "author"}], "project_urls": {"Home": "https://github.com/hackebrot/jinja2-time"}, "document_names": {"description": "DESCRIPTION.rst"}}}, "classifiers": ["Development Status :: 3 - Alpha", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Natural Language :: English", "Operating System :: OS Independent", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Programming Language :: Python"]} \ No newline at end of file diff --git a/third_party/python/jinja2_time/jinja2_time-0.2.0.dist-info/top_level.txt b/third_party/python/jinja2_time/jinja2_time-0.2.0.dist-info/top_level.txt new file mode 100644 index 0000000000000..5ac870f1f95c8 --- /dev/null +++ b/third_party/python/jinja2_time/jinja2_time-0.2.0.dist-info/top_level.txt @@ -0,0 +1 @@ +jinja2_time diff --git a/third_party/python/jinja2_time/jinja2_time/__init__.py b/third_party/python/jinja2_time/jinja2_time/__init__.py new file mode 100644 index 0000000000000..d813a7b741d15 --- /dev/null +++ b/third_party/python/jinja2_time/jinja2_time/__init__.py @@ -0,0 +1,10 @@ +# -*- coding: utf-8 -*- + +from .jinja2_time import TimeExtension + +__author__ = 'Raphael Pierzina' +__email__ = 'raphael@hackebrot.de' +__version__ = '0.2.0' + + +__all__ = ['TimeExtension'] diff --git a/third_party/python/jinja2_time/jinja2_time/jinja2_time.py b/third_party/python/jinja2_time/jinja2_time/jinja2_time.py new file mode 100644 index 0000000000000..ce713cbdc2a6f --- /dev/null +++ b/third_party/python/jinja2_time/jinja2_time/jinja2_time.py @@ -0,0 +1,65 @@ +# -*- coding: utf-8 -*- + +import arrow + +from jinja2 import nodes +from jinja2.ext import Extension + + +class TimeExtension(Extension): + tags = set(['now']) + + def __init__(self, environment): + super(TimeExtension, self).__init__(environment) + + # add the defaults to the environment + environment.extend(datetime_format='%Y-%m-%d') + + def _datetime(self, timezone, operator, offset, datetime_format): + d = arrow.now(timezone) + + # Parse replace kwargs from offset and include operator + replace_params = {} + for param in offset.split(','): + interval, value = param.split('=') + replace_params[interval.strip()] = float(operator + value.strip()) + d = d.replace(**replace_params) + + if datetime_format is None: + datetime_format = self.environment.datetime_format + return d.strftime(datetime_format) + + def _now(self, timezone, datetime_format): + if datetime_format is None: + datetime_format = self.environment.datetime_format + return arrow.now(timezone).strftime(datetime_format) + + def parse(self, parser): + lineno = next(parser.stream).lineno + + node = parser.parse_expression() + + if parser.stream.skip_if('comma'): + datetime_format = parser.parse_expression() + else: + datetime_format = nodes.Const(None) + + if isinstance(node, nodes.Add): + call_method = self.call_method( + '_datetime', + [node.left, nodes.Const('+'), node.right, datetime_format], + lineno=lineno, + ) + elif isinstance(node, nodes.Sub): + call_method = self.call_method( + '_datetime', + [node.left, nodes.Const('-'), node.right, datetime_format], + lineno=lineno, + ) + else: + call_method = self.call_method( + '_now', + [node, datetime_format], + lineno=lineno, + ) + return nodes.Output([call_method], lineno=lineno) diff --git a/third_party/python/poetry.lock b/third_party/python/poetry.lock index f70ed08682fc5..fad0bbbddcf36 100644 --- a/third_party/python/poetry.lock +++ b/third_party/python/poetry.lock @@ -82,6 +82,22 @@ files = [ {file = "appdirs-1.4.4.tar.gz", hash = "sha256:7d5d0167b2b1ba821647616af46a749d1c653740dd0d2415100fe26e27afdf41"}, ] +[[package]] +name = "arrow" +version = "1.2.3" +description = "Better dates & times for Python" +category = "main" +optional = false +python-versions = ">=3.6" +files = [ + {file = "arrow-1.2.3-py3-none-any.whl", hash = "sha256:5a49ab92e3b7b71d96cd6bfcc4df14efefc9dfa96ea19045815914a6ab6b1fe2"}, + {file = "arrow-1.2.3.tar.gz", hash = "sha256:3934b30ca1b9f292376d9db15b19446088d12ec58629bc3f0da28fd55fb633a1"}, +] + +[package.dependencies] +python-dateutil = ">=2.7.0" +typing-extensions = {version = "*", markers = "python_version < \"3.8\""} + [[package]] name = "async-timeout" version = "3.0.1" @@ -116,6 +132,21 @@ docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib- tests = ["attrs[tests-no-zope]", "zope-interface"] tests-no-zope = ["cloudpickle", "hypothesis", "mypy (>=1.1.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +[[package]] +name = "binaryornot" +version = "0.4.4" +description = "Ultra-lightweight pure Python package to check if a file is binary or text." +category = "main" +optional = false +python-versions = "*" +files = [ + {file = "binaryornot-0.4.4-py2.py3-none-any.whl", hash = "sha256:b8b71173c917bddcd2c16070412e369c3ed7f0528926f70cac18a6c97fd563e4"}, + {file = "binaryornot-0.4.4.tar.gz", hash = "sha256:359501dfc9d40632edc9fac890e19542db1a287bbcfa58175b66658392018061"}, +] + +[package.dependencies] +chardet = ">=3.0.2" + [[package]] name = "blessed" version = "1.19.1" @@ -213,6 +244,27 @@ files = [ six = "*" toml = "*" +[[package]] +name = "cookiecutter" +version = "2.1.1" +description = "A command-line utility that creates projects from project templates, e.g. creating a Python package project from a Python package project template." +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "cookiecutter-2.1.1-py2.py3-none-any.whl", hash = "sha256:9f3ab027cec4f70916e28f03470bdb41e637a3ad354b4d65c765d93aad160022"}, + {file = "cookiecutter-2.1.1.tar.gz", hash = "sha256:f3982be8d9c53dac1261864013fdec7f83afd2e42ede6f6dd069c5e149c540d5"}, +] + +[package.dependencies] +binaryornot = ">=0.4.4" +click = ">=7.0,<9.0.0" +Jinja2 = ">=2.7,<4.0.0" +jinja2-time = ">=0.2.0" +python-slugify = ">=4.0.0" +pyyaml = ">=5.3.1" +requests = ">=2.23.0" + [[package]] name = "cookies" version = "2.2.1" @@ -428,6 +480,22 @@ MarkupSafe = ">=0.23" [package.extras] i18n = ["Babel (>=0.8)"] +[[package]] +name = "jinja2-time" +version = "0.2.0" +description = "Jinja2 Extension for Dates and Times" +category = "main" +optional = false +python-versions = "*" +files = [ + {file = "jinja2-time-0.2.0.tar.gz", hash = "sha256:d14eaa4d315e7688daa4969f616f226614350c48730bfa1692d2caebd8c90d40"}, + {file = "jinja2_time-0.2.0-py2.py3-none-any.whl", hash = "sha256:d3eab6605e3ec8b7a0863df09cc1d23714908fa61aa6986a845c20ba488b4efa"}, +] + +[package.dependencies] +arrow = "*" +jinja2 = "*" + [[package]] name = "jinxed" version = "1.2.0" @@ -821,6 +889,21 @@ files = [ [package.dependencies] six = "*" +[[package]] +name = "python-dateutil" +version = "2.8.2" +description = "Extensions to the standard Python datetime module" +category = "main" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +files = [ + {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"}, + {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"}, +] + +[package.dependencies] +six = ">=1.5" + [[package]] name = "python-hglib" version = "2.4" @@ -832,6 +915,24 @@ files = [ {file = "python-hglib-2.4.tar.gz", hash = "sha256:693d6ed92a6566e78802c7a03c256cda33d08c63ad3f00fcfa11379b184b9462"}, ] +[[package]] +name = "python-slugify" +version = "8.0.1" +description = "A Python slugify application that also handles Unicode" +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "python-slugify-8.0.1.tar.gz", hash = "sha256:ce0d46ddb668b3be82f4ed5e503dbc33dd815d83e2eb6824211310d3fb172a27"}, + {file = "python_slugify-8.0.1-py2.py3-none-any.whl", hash = "sha256:70ca6ea68fe63ecc8fa4fcf00ae651fc8a5d02d93dcd12ae6d4fc7ca46c4d395"}, +] + +[package.dependencies] +text-unidecode = ">=1.3" + +[package.extras] +unidecode = ["Unidecode (>=1.1.1)"] + [[package]] name = "pyyaml" version = "6.0.1" @@ -1063,19 +1164,19 @@ test = ["aiofiles", "coverage", "flake8", "httmock", "httptest", "hypothesis", " [[package]] name = "taskcluster-taskgraph" -version = "3.5.2" +version = "5.6.1" description = "Build taskcluster taskgraphs" category = "main" optional = false python-versions = "*" files = [ - {file = "taskcluster-taskgraph-3.5.2.tar.gz", hash = "sha256:62f1a320d6b310f65151904a9992719a9b2c4c41ef8f57be810899fd3c5d2703"}, - {file = "taskcluster_taskgraph-3.5.2-py3-none-any.whl", hash = "sha256:6a024ba2383f56e11b764500f92837afb825612a49d24bde9791dfa7aa7ddaec"}, + {file = "taskcluster-taskgraph-5.6.1.tar.gz", hash = "sha256:15c5455eefe2b91155e694452199f9cc4753dff5a74a95cf602e334f9621a8e6"}, + {file = "taskcluster_taskgraph-5.6.1-py3-none-any.whl", hash = "sha256:db14109f1edcbe03c96d2de81eb84e2fed5d218d86a5d026947eeb1a6cfe5a28"}, ] [package.dependencies] appdirs = ">=1.4" -attrs = ">=19.1.0" +cookiecutter = ">=2.1,<3.0" json-e = ">=2.7" mozilla-repo-urls = "*" PyYAML = ">=5.4" @@ -1102,6 +1203,18 @@ files = [ {file = "taskcluster_urls-13.0.1-py3-none-any.whl", hash = "sha256:f66dcbd6572a6216ab65949f0fa0b91f2df647918028436c384e6af5cd12ae2b"}, ] +[[package]] +name = "text-unidecode" +version = "1.3" +description = "The most basic Text::Unidecode port" +category = "main" +optional = false +python-versions = "*" +files = [ + {file = "text-unidecode-1.3.tar.gz", hash = "sha256:bad6603bb14d279193107714b288be206cac565dfa49aa5b105294dd5c4aab93"}, + {file = "text_unidecode-1.3-py2.py3-none-any.whl", hash = "sha256:1311f10e8b895935241623731c2ba64f4c455287888b18189350b67134a822e8"}, +] + [[package]] name = "toml" version = "0.10.2" @@ -1290,4 +1403,4 @@ testing = ["func-timeout", "jaraco.itertools", "pytest (>=4.6)", "pytest-black ( [metadata] lock-version = "2.0" python-versions = "^3.7" -content-hash = "cd20a7d65e99e5dda7af4b292cf4e0fab6c47e6ea7efec9b1b7f9ded536f6252" +content-hash = "92763d9f4ec346b3ec7f15869b900855270de3af5b99cd44a45cfa2835a4ed78" diff --git a/third_party/python/python_dateutil/dateutil/__init__.py b/third_party/python/python_dateutil/dateutil/__init__.py new file mode 100644 index 0000000000000..0defb82e21f21 --- /dev/null +++ b/third_party/python/python_dateutil/dateutil/__init__.py @@ -0,0 +1,8 @@ +# -*- coding: utf-8 -*- +try: + from ._version import version as __version__ +except ImportError: + __version__ = 'unknown' + +__all__ = ['easter', 'parser', 'relativedelta', 'rrule', 'tz', + 'utils', 'zoneinfo'] diff --git a/third_party/python/python_dateutil/dateutil/_common.py b/third_party/python/python_dateutil/dateutil/_common.py new file mode 100644 index 0000000000000..4eb2659bd2986 --- /dev/null +++ b/third_party/python/python_dateutil/dateutil/_common.py @@ -0,0 +1,43 @@ +""" +Common code used in multiple modules. +""" + + +class weekday(object): + __slots__ = ["weekday", "n"] + + def __init__(self, weekday, n=None): + self.weekday = weekday + self.n = n + + def __call__(self, n): + if n == self.n: + return self + else: + return self.__class__(self.weekday, n) + + def __eq__(self, other): + try: + if self.weekday != other.weekday or self.n != other.n: + return False + except AttributeError: + return False + return True + + def __hash__(self): + return hash(( + self.weekday, + self.n, + )) + + def __ne__(self, other): + return not (self == other) + + def __repr__(self): + s = ("MO", "TU", "WE", "TH", "FR", "SA", "SU")[self.weekday] + if not self.n: + return s + else: + return "%s(%+d)" % (s, self.n) + +# vim:ts=4:sw=4:et diff --git a/third_party/python/python_dateutil/dateutil/_version.py b/third_party/python/python_dateutil/dateutil/_version.py new file mode 100644 index 0000000000000..b723056a756af --- /dev/null +++ b/third_party/python/python_dateutil/dateutil/_version.py @@ -0,0 +1,5 @@ +# coding: utf-8 +# file generated by setuptools_scm +# don't change, don't track in version control +version = '2.8.2' +version_tuple = (2, 8, 2) diff --git a/third_party/python/python_dateutil/dateutil/easter.py b/third_party/python/python_dateutil/dateutil/easter.py new file mode 100644 index 0000000000000..f74d1f7442473 --- /dev/null +++ b/third_party/python/python_dateutil/dateutil/easter.py @@ -0,0 +1,89 @@ +# -*- coding: utf-8 -*- +""" +This module offers a generic Easter computing method for any given year, using +Western, Orthodox or Julian algorithms. +""" + +import datetime + +__all__ = ["easter", "EASTER_JULIAN", "EASTER_ORTHODOX", "EASTER_WESTERN"] + +EASTER_JULIAN = 1 +EASTER_ORTHODOX = 2 +EASTER_WESTERN = 3 + + +def easter(year, method=EASTER_WESTERN): + """ + This method was ported from the work done by GM Arts, + on top of the algorithm by Claus Tondering, which was + based in part on the algorithm of Ouding (1940), as + quoted in "Explanatory Supplement to the Astronomical + Almanac", P. Kenneth Seidelmann, editor. + + This algorithm implements three different Easter + calculation methods: + + 1. Original calculation in Julian calendar, valid in + dates after 326 AD + 2. Original method, with date converted to Gregorian + calendar, valid in years 1583 to 4099 + 3. Revised method, in Gregorian calendar, valid in + years 1583 to 4099 as well + + These methods are represented by the constants: + + * ``EASTER_JULIAN = 1`` + * ``EASTER_ORTHODOX = 2`` + * ``EASTER_WESTERN = 3`` + + The default method is method 3. + + More about the algorithm may be found at: + + `GM Arts: Easter Algorithms `_ + + and + + `The Calendar FAQ: Easter `_ + + """ + + if not (1 <= method <= 3): + raise ValueError("invalid method") + + # g - Golden year - 1 + # c - Century + # h - (23 - Epact) mod 30 + # i - Number of days from March 21 to Paschal Full Moon + # j - Weekday for PFM (0=Sunday, etc) + # p - Number of days from March 21 to Sunday on or before PFM + # (-6 to 28 methods 1 & 3, to 56 for method 2) + # e - Extra days to add for method 2 (converting Julian + # date to Gregorian date) + + y = year + g = y % 19 + e = 0 + if method < 3: + # Old method + i = (19*g + 15) % 30 + j = (y + y//4 + i) % 7 + if method == 2: + # Extra dates to convert Julian to Gregorian date + e = 10 + if y > 1600: + e = e + y//100 - 16 - (y//100 - 16)//4 + else: + # New method + c = y//100 + h = (c - c//4 - (8*c + 13)//25 + 19*g + 15) % 30 + i = h - (h//28)*(1 - (h//28)*(29//(h + 1))*((21 - g)//11)) + j = (y + y//4 + i + 2 - c + c//4) % 7 + + # p can be from -6 to 56 corresponding to dates 22 March to 23 May + # (later dates apply to method 2, although 23 May never actually occurs) + p = i - j + e + d = 1 + (p + 27 + (p + 6)//40) % 31 + m = 3 + (p + 26)//30 + return datetime.date(int(y), int(m), int(d)) diff --git a/third_party/python/python_dateutil/dateutil/parser/__init__.py b/third_party/python/python_dateutil/dateutil/parser/__init__.py new file mode 100644 index 0000000000000..d174b0e4dcc47 --- /dev/null +++ b/third_party/python/python_dateutil/dateutil/parser/__init__.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +from ._parser import parse, parser, parserinfo, ParserError +from ._parser import DEFAULTPARSER, DEFAULTTZPARSER +from ._parser import UnknownTimezoneWarning + +from ._parser import __doc__ + +from .isoparser import isoparser, isoparse + +__all__ = ['parse', 'parser', 'parserinfo', + 'isoparse', 'isoparser', + 'ParserError', + 'UnknownTimezoneWarning'] + + +### +# Deprecate portions of the private interface so that downstream code that +# is improperly relying on it is given *some* notice. + + +def __deprecated_private_func(f): + from functools import wraps + import warnings + + msg = ('{name} is a private function and may break without warning, ' + 'it will be moved and or renamed in future versions.') + msg = msg.format(name=f.__name__) + + @wraps(f) + def deprecated_func(*args, **kwargs): + warnings.warn(msg, DeprecationWarning) + return f(*args, **kwargs) + + return deprecated_func + +def __deprecate_private_class(c): + import warnings + + msg = ('{name} is a private class and may break without warning, ' + 'it will be moved and or renamed in future versions.') + msg = msg.format(name=c.__name__) + + class private_class(c): + __doc__ = c.__doc__ + + def __init__(self, *args, **kwargs): + warnings.warn(msg, DeprecationWarning) + super(private_class, self).__init__(*args, **kwargs) + + private_class.__name__ = c.__name__ + + return private_class + + +from ._parser import _timelex, _resultbase +from ._parser import _tzparser, _parsetz + +_timelex = __deprecate_private_class(_timelex) +_tzparser = __deprecate_private_class(_tzparser) +_resultbase = __deprecate_private_class(_resultbase) +_parsetz = __deprecated_private_func(_parsetz) diff --git a/third_party/python/python_dateutil/dateutil/parser/_parser.py b/third_party/python/python_dateutil/dateutil/parser/_parser.py new file mode 100644 index 0000000000000..37d1663b2f724 --- /dev/null +++ b/third_party/python/python_dateutil/dateutil/parser/_parser.py @@ -0,0 +1,1613 @@ +# -*- coding: utf-8 -*- +""" +This module offers a generic date/time string parser which is able to parse +most known formats to represent a date and/or time. + +This module attempts to be forgiving with regards to unlikely input formats, +returning a datetime object even for dates which are ambiguous. If an element +of a date/time stamp is omitted, the following rules are applied: + +- If AM or PM is left unspecified, a 24-hour clock is assumed, however, an hour + on a 12-hour clock (``0 <= hour <= 12``) *must* be specified if AM or PM is + specified. +- If a time zone is omitted, a timezone-naive datetime is returned. + +If any other elements are missing, they are taken from the +:class:`datetime.datetime` object passed to the parameter ``default``. If this +results in a day number exceeding the valid number of days per month, the +value falls back to the end of the month. + +Additional resources about date/time string formats can be found below: + +- `A summary of the international standard date and time notation + `_ +- `W3C Date and Time Formats `_ +- `Time Formats (Planetary Rings Node) `_ +- `CPAN ParseDate module + `_ +- `Java SimpleDateFormat Class + `_ +""" +from __future__ import unicode_literals + +import datetime +import re +import string +import time +import warnings + +from calendar import monthrange +from io import StringIO + +import six +from six import integer_types, text_type + +from decimal import Decimal + +from warnings import warn + +from .. import relativedelta +from .. import tz + +__all__ = ["parse", "parserinfo", "ParserError"] + + +# TODO: pandas.core.tools.datetimes imports this explicitly. Might be worth +# making public and/or figuring out if there is something we can +# take off their plate. +class _timelex(object): + # Fractional seconds are sometimes split by a comma + _split_decimal = re.compile("([.,])") + + def __init__(self, instream): + if isinstance(instream, (bytes, bytearray)): + instream = instream.decode() + + if isinstance(instream, text_type): + instream = StringIO(instream) + elif getattr(instream, 'read', None) is None: + raise TypeError('Parser must be a string or character stream, not ' + '{itype}'.format(itype=instream.__class__.__name__)) + + self.instream = instream + self.charstack = [] + self.tokenstack = [] + self.eof = False + + def get_token(self): + """ + This function breaks the time string into lexical units (tokens), which + can be parsed by the parser. Lexical units are demarcated by changes in + the character set, so any continuous string of letters is considered + one unit, any continuous string of numbers is considered one unit. + + The main complication arises from the fact that dots ('.') can be used + both as separators (e.g. "Sep.20.2009") or decimal points (e.g. + "4:30:21.447"). As such, it is necessary to read the full context of + any dot-separated strings before breaking it into tokens; as such, this + function maintains a "token stack", for when the ambiguous context + demands that multiple tokens be parsed at once. + """ + if self.tokenstack: + return self.tokenstack.pop(0) + + seenletters = False + token = None + state = None + + while not self.eof: + # We only realize that we've reached the end of a token when we + # find a character that's not part of the current token - since + # that character may be part of the next token, it's stored in the + # charstack. + if self.charstack: + nextchar = self.charstack.pop(0) + else: + nextchar = self.instream.read(1) + while nextchar == '\x00': + nextchar = self.instream.read(1) + + if not nextchar: + self.eof = True + break + elif not state: + # First character of the token - determines if we're starting + # to parse a word, a number or something else. + token = nextchar + if self.isword(nextchar): + state = 'a' + elif self.isnum(nextchar): + state = '0' + elif self.isspace(nextchar): + token = ' ' + break # emit token + else: + break # emit token + elif state == 'a': + # If we've already started reading a word, we keep reading + # letters until we find something that's not part of a word. + seenletters = True + if self.isword(nextchar): + token += nextchar + elif nextchar == '.': + token += nextchar + state = 'a.' + else: + self.charstack.append(nextchar) + break # emit token + elif state == '0': + # If we've already started reading a number, we keep reading + # numbers until we find something that doesn't fit. + if self.isnum(nextchar): + token += nextchar + elif nextchar == '.' or (nextchar == ',' and len(token) >= 2): + token += nextchar + state = '0.' + else: + self.charstack.append(nextchar) + break # emit token + elif state == 'a.': + # If we've seen some letters and a dot separator, continue + # parsing, and the tokens will be broken up later. + seenletters = True + if nextchar == '.' or self.isword(nextchar): + token += nextchar + elif self.isnum(nextchar) and token[-1] == '.': + token += nextchar + state = '0.' + else: + self.charstack.append(nextchar) + break # emit token + elif state == '0.': + # If we've seen at least one dot separator, keep going, we'll + # break up the tokens later. + if nextchar == '.' or self.isnum(nextchar): + token += nextchar + elif self.isword(nextchar) and token[-1] == '.': + token += nextchar + state = 'a.' + else: + self.charstack.append(nextchar) + break # emit token + + if (state in ('a.', '0.') and (seenletters or token.count('.') > 1 or + token[-1] in '.,')): + l = self._split_decimal.split(token) + token = l[0] + for tok in l[1:]: + if tok: + self.tokenstack.append(tok) + + if state == '0.' and token.count('.') == 0: + token = token.replace(',', '.') + + return token + + def __iter__(self): + return self + + def __next__(self): + token = self.get_token() + if token is None: + raise StopIteration + + return token + + def next(self): + return self.__next__() # Python 2.x support + + @classmethod + def split(cls, s): + return list(cls(s)) + + @classmethod + def isword(cls, nextchar): + """ Whether or not the next character is part of a word """ + return nextchar.isalpha() + + @classmethod + def isnum(cls, nextchar): + """ Whether the next character is part of a number """ + return nextchar.isdigit() + + @classmethod + def isspace(cls, nextchar): + """ Whether the next character is whitespace """ + return nextchar.isspace() + + +class _resultbase(object): + + def __init__(self): + for attr in self.__slots__: + setattr(self, attr, None) + + def _repr(self, classname): + l = [] + for attr in self.__slots__: + value = getattr(self, attr) + if value is not None: + l.append("%s=%s" % (attr, repr(value))) + return "%s(%s)" % (classname, ", ".join(l)) + + def __len__(self): + return (sum(getattr(self, attr) is not None + for attr in self.__slots__)) + + def __repr__(self): + return self._repr(self.__class__.__name__) + + +class parserinfo(object): + """ + Class which handles what inputs are accepted. Subclass this to customize + the language and acceptable values for each parameter. + + :param dayfirst: + Whether to interpret the first value in an ambiguous 3-integer date + (e.g. 01/05/09) as the day (``True``) or month (``False``). If + ``yearfirst`` is set to ``True``, this distinguishes between YDM + and YMD. Default is ``False``. + + :param yearfirst: + Whether to interpret the first value in an ambiguous 3-integer date + (e.g. 01/05/09) as the year. If ``True``, the first number is taken + to be the year, otherwise the last number is taken to be the year. + Default is ``False``. + """ + + # m from a.m/p.m, t from ISO T separator + JUMP = [" ", ".", ",", ";", "-", "/", "'", + "at", "on", "and", "ad", "m", "t", "of", + "st", "nd", "rd", "th"] + + WEEKDAYS = [("Mon", "Monday"), + ("Tue", "Tuesday"), # TODO: "Tues" + ("Wed", "Wednesday"), + ("Thu", "Thursday"), # TODO: "Thurs" + ("Fri", "Friday"), + ("Sat", "Saturday"), + ("Sun", "Sunday")] + MONTHS = [("Jan", "January"), + ("Feb", "February"), # TODO: "Febr" + ("Mar", "March"), + ("Apr", "April"), + ("May", "May"), + ("Jun", "June"), + ("Jul", "July"), + ("Aug", "August"), + ("Sep", "Sept", "September"), + ("Oct", "October"), + ("Nov", "November"), + ("Dec", "December")] + HMS = [("h", "hour", "hours"), + ("m", "minute", "minutes"), + ("s", "second", "seconds")] + AMPM = [("am", "a"), + ("pm", "p")] + UTCZONE = ["UTC", "GMT", "Z", "z"] + PERTAIN = ["of"] + TZOFFSET = {} + # TODO: ERA = ["AD", "BC", "CE", "BCE", "Stardate", + # "Anno Domini", "Year of Our Lord"] + + def __init__(self, dayfirst=False, yearfirst=False): + self._jump = self._convert(self.JUMP) + self._weekdays = self._convert(self.WEEKDAYS) + self._months = self._convert(self.MONTHS) + self._hms = self._convert(self.HMS) + self._ampm = self._convert(self.AMPM) + self._utczone = self._convert(self.UTCZONE) + self._pertain = self._convert(self.PERTAIN) + + self.dayfirst = dayfirst + self.yearfirst = yearfirst + + self._year = time.localtime().tm_year + self._century = self._year // 100 * 100 + + def _convert(self, lst): + dct = {} + for i, v in enumerate(lst): + if isinstance(v, tuple): + for v in v: + dct[v.lower()] = i + else: + dct[v.lower()] = i + return dct + + def jump(self, name): + return name.lower() in self._jump + + def weekday(self, name): + try: + return self._weekdays[name.lower()] + except KeyError: + pass + return None + + def month(self, name): + try: + return self._months[name.lower()] + 1 + except KeyError: + pass + return None + + def hms(self, name): + try: + return self._hms[name.lower()] + except KeyError: + return None + + def ampm(self, name): + try: + return self._ampm[name.lower()] + except KeyError: + return None + + def pertain(self, name): + return name.lower() in self._pertain + + def utczone(self, name): + return name.lower() in self._utczone + + def tzoffset(self, name): + if name in self._utczone: + return 0 + + return self.TZOFFSET.get(name) + + def convertyear(self, year, century_specified=False): + """ + Converts two-digit years to year within [-50, 49] + range of self._year (current local time) + """ + + # Function contract is that the year is always positive + assert year >= 0 + + if year < 100 and not century_specified: + # assume current century to start + year += self._century + + if year >= self._year + 50: # if too far in future + year -= 100 + elif year < self._year - 50: # if too far in past + year += 100 + + return year + + def validate(self, res): + # move to info + if res.year is not None: + res.year = self.convertyear(res.year, res.century_specified) + + if ((res.tzoffset == 0 and not res.tzname) or + (res.tzname == 'Z' or res.tzname == 'z')): + res.tzname = "UTC" + res.tzoffset = 0 + elif res.tzoffset != 0 and res.tzname and self.utczone(res.tzname): + res.tzoffset = 0 + return True + + +class _ymd(list): + def __init__(self, *args, **kwargs): + super(self.__class__, self).__init__(*args, **kwargs) + self.century_specified = False + self.dstridx = None + self.mstridx = None + self.ystridx = None + + @property + def has_year(self): + return self.ystridx is not None + + @property + def has_month(self): + return self.mstridx is not None + + @property + def has_day(self): + return self.dstridx is not None + + def could_be_day(self, value): + if self.has_day: + return False + elif not self.has_month: + return 1 <= value <= 31 + elif not self.has_year: + # Be permissive, assume leap year + month = self[self.mstridx] + return 1 <= value <= monthrange(2000, month)[1] + else: + month = self[self.mstridx] + year = self[self.ystridx] + return 1 <= value <= monthrange(year, month)[1] + + def append(self, val, label=None): + if hasattr(val, '__len__'): + if val.isdigit() and len(val) > 2: + self.century_specified = True + if label not in [None, 'Y']: # pragma: no cover + raise ValueError(label) + label = 'Y' + elif val > 100: + self.century_specified = True + if label not in [None, 'Y']: # pragma: no cover + raise ValueError(label) + label = 'Y' + + super(self.__class__, self).append(int(val)) + + if label == 'M': + if self.has_month: + raise ValueError('Month is already set') + self.mstridx = len(self) - 1 + elif label == 'D': + if self.has_day: + raise ValueError('Day is already set') + self.dstridx = len(self) - 1 + elif label == 'Y': + if self.has_year: + raise ValueError('Year is already set') + self.ystridx = len(self) - 1 + + def _resolve_from_stridxs(self, strids): + """ + Try to resolve the identities of year/month/day elements using + ystridx, mstridx, and dstridx, if enough of these are specified. + """ + if len(self) == 3 and len(strids) == 2: + # we can back out the remaining stridx value + missing = [x for x in range(3) if x not in strids.values()] + key = [x for x in ['y', 'm', 'd'] if x not in strids] + assert len(missing) == len(key) == 1 + key = key[0] + val = missing[0] + strids[key] = val + + assert len(self) == len(strids) # otherwise this should not be called + out = {key: self[strids[key]] for key in strids} + return (out.get('y'), out.get('m'), out.get('d')) + + def resolve_ymd(self, yearfirst, dayfirst): + len_ymd = len(self) + year, month, day = (None, None, None) + + strids = (('y', self.ystridx), + ('m', self.mstridx), + ('d', self.dstridx)) + + strids = {key: val for key, val in strids if val is not None} + if (len(self) == len(strids) > 0 or + (len(self) == 3 and len(strids) == 2)): + return self._resolve_from_stridxs(strids) + + mstridx = self.mstridx + + if len_ymd > 3: + raise ValueError("More than three YMD values") + elif len_ymd == 1 or (mstridx is not None and len_ymd == 2): + # One member, or two members with a month string + if mstridx is not None: + month = self[mstridx] + # since mstridx is 0 or 1, self[mstridx-1] always + # looks up the other element + other = self[mstridx - 1] + else: + other = self[0] + + if len_ymd > 1 or mstridx is None: + if other > 31: + year = other + else: + day = other + + elif len_ymd == 2: + # Two members with numbers + if self[0] > 31: + # 99-01 + year, month = self + elif self[1] > 31: + # 01-99 + month, year = self + elif dayfirst and self[1] <= 12: + # 13-01 + day, month = self + else: + # 01-13 + month, day = self + + elif len_ymd == 3: + # Three members + if mstridx == 0: + if self[1] > 31: + # Apr-2003-25 + month, year, day = self + else: + month, day, year = self + elif mstridx == 1: + if self[0] > 31 or (yearfirst and self[2] <= 31): + # 99-Jan-01 + year, month, day = self + else: + # 01-Jan-01 + # Give precedence to day-first, since + # two-digit years is usually hand-written. + day, month, year = self + + elif mstridx == 2: + # WTF!? + if self[1] > 31: + # 01-99-Jan + day, year, month = self + else: + # 99-01-Jan + year, day, month = self + + else: + if (self[0] > 31 or + self.ystridx == 0 or + (yearfirst and self[1] <= 12 and self[2] <= 31)): + # 99-01-01 + if dayfirst and self[2] <= 12: + year, day, month = self + else: + year, month, day = self + elif self[0] > 12 or (dayfirst and self[1] <= 12): + # 13-01-01 + day, month, year = self + else: + # 01-13-01 + month, day, year = self + + return year, month, day + + +class parser(object): + def __init__(self, info=None): + self.info = info or parserinfo() + + def parse(self, timestr, default=None, + ignoretz=False, tzinfos=None, **kwargs): + """ + Parse the date/time string into a :class:`datetime.datetime` object. + + :param timestr: + Any date/time string using the supported formats. + + :param default: + The default datetime object, if this is a datetime object and not + ``None``, elements specified in ``timestr`` replace elements in the + default object. + + :param ignoretz: + If set ``True``, time zones in parsed strings are ignored and a + naive :class:`datetime.datetime` object is returned. + + :param tzinfos: + Additional time zone names / aliases which may be present in the + string. This argument maps time zone names (and optionally offsets + from those time zones) to time zones. This parameter can be a + dictionary with timezone aliases mapping time zone names to time + zones or a function taking two parameters (``tzname`` and + ``tzoffset``) and returning a time zone. + + The timezones to which the names are mapped can be an integer + offset from UTC in seconds or a :class:`tzinfo` object. + + .. doctest:: + :options: +NORMALIZE_WHITESPACE + + >>> from dateutil.parser import parse + >>> from dateutil.tz import gettz + >>> tzinfos = {"BRST": -7200, "CST": gettz("America/Chicago")} + >>> parse("2012-01-19 17:21:00 BRST", tzinfos=tzinfos) + datetime.datetime(2012, 1, 19, 17, 21, tzinfo=tzoffset(u'BRST', -7200)) + >>> parse("2012-01-19 17:21:00 CST", tzinfos=tzinfos) + datetime.datetime(2012, 1, 19, 17, 21, + tzinfo=tzfile('/usr/share/zoneinfo/America/Chicago')) + + This parameter is ignored if ``ignoretz`` is set. + + :param \\*\\*kwargs: + Keyword arguments as passed to ``_parse()``. + + :return: + Returns a :class:`datetime.datetime` object or, if the + ``fuzzy_with_tokens`` option is ``True``, returns a tuple, the + first element being a :class:`datetime.datetime` object, the second + a tuple containing the fuzzy tokens. + + :raises ParserError: + Raised for invalid or unknown string format, if the provided + :class:`tzinfo` is not in a valid format, or if an invalid date + would be created. + + :raises TypeError: + Raised for non-string or character stream input. + + :raises OverflowError: + Raised if the parsed date exceeds the largest valid C integer on + your system. + """ + + if default is None: + default = datetime.datetime.now().replace(hour=0, minute=0, + second=0, microsecond=0) + + res, skipped_tokens = self._parse(timestr, **kwargs) + + if res is None: + raise ParserError("Unknown string format: %s", timestr) + + if len(res) == 0: + raise ParserError("String does not contain a date: %s", timestr) + + try: + ret = self._build_naive(res, default) + except ValueError as e: + six.raise_from(ParserError(str(e) + ": %s", timestr), e) + + if not ignoretz: + ret = self._build_tzaware(ret, res, tzinfos) + + if kwargs.get('fuzzy_with_tokens', False): + return ret, skipped_tokens + else: + return ret + + class _result(_resultbase): + __slots__ = ["year", "month", "day", "weekday", + "hour", "minute", "second", "microsecond", + "tzname", "tzoffset", "ampm","any_unused_tokens"] + + def _parse(self, timestr, dayfirst=None, yearfirst=None, fuzzy=False, + fuzzy_with_tokens=False): + """ + Private method which performs the heavy lifting of parsing, called from + ``parse()``, which passes on its ``kwargs`` to this function. + + :param timestr: + The string to parse. + + :param dayfirst: + Whether to interpret the first value in an ambiguous 3-integer date + (e.g. 01/05/09) as the day (``True``) or month (``False``). If + ``yearfirst`` is set to ``True``, this distinguishes between YDM + and YMD. If set to ``None``, this value is retrieved from the + current :class:`parserinfo` object (which itself defaults to + ``False``). + + :param yearfirst: + Whether to interpret the first value in an ambiguous 3-integer date + (e.g. 01/05/09) as the year. If ``True``, the first number is taken + to be the year, otherwise the last number is taken to be the year. + If this is set to ``None``, the value is retrieved from the current + :class:`parserinfo` object (which itself defaults to ``False``). + + :param fuzzy: + Whether to allow fuzzy parsing, allowing for string like "Today is + January 1, 2047 at 8:21:00AM". + + :param fuzzy_with_tokens: + If ``True``, ``fuzzy`` is automatically set to True, and the parser + will return a tuple where the first element is the parsed + :class:`datetime.datetime` datetimestamp and the second element is + a tuple containing the portions of the string which were ignored: + + .. doctest:: + + >>> from dateutil.parser import parse + >>> parse("Today is January 1, 2047 at 8:21:00AM", fuzzy_with_tokens=True) + (datetime.datetime(2047, 1, 1, 8, 21), (u'Today is ', u' ', u'at ')) + + """ + if fuzzy_with_tokens: + fuzzy = True + + info = self.info + + if dayfirst is None: + dayfirst = info.dayfirst + + if yearfirst is None: + yearfirst = info.yearfirst + + res = self._result() + l = _timelex.split(timestr) # Splits the timestr into tokens + + skipped_idxs = [] + + # year/month/day list + ymd = _ymd() + + len_l = len(l) + i = 0 + try: + while i < len_l: + + # Check if it's a number + value_repr = l[i] + try: + value = float(value_repr) + except ValueError: + value = None + + if value is not None: + # Numeric token + i = self._parse_numeric_token(l, i, info, ymd, res, fuzzy) + + # Check weekday + elif info.weekday(l[i]) is not None: + value = info.weekday(l[i]) + res.weekday = value + + # Check month name + elif info.month(l[i]) is not None: + value = info.month(l[i]) + ymd.append(value, 'M') + + if i + 1 < len_l: + if l[i + 1] in ('-', '/'): + # Jan-01[-99] + sep = l[i + 1] + ymd.append(l[i + 2]) + + if i + 3 < len_l and l[i + 3] == sep: + # Jan-01-99 + ymd.append(l[i + 4]) + i += 2 + + i += 2 + + elif (i + 4 < len_l and l[i + 1] == l[i + 3] == ' ' and + info.pertain(l[i + 2])): + # Jan of 01 + # In this case, 01 is clearly year + if l[i + 4].isdigit(): + # Convert it here to become unambiguous + value = int(l[i + 4]) + year = str(info.convertyear(value)) + ymd.append(year, 'Y') + else: + # Wrong guess + pass + # TODO: not hit in tests + i += 4 + + # Check am/pm + elif info.ampm(l[i]) is not None: + value = info.ampm(l[i]) + val_is_ampm = self._ampm_valid(res.hour, res.ampm, fuzzy) + + if val_is_ampm: + res.hour = self._adjust_ampm(res.hour, value) + res.ampm = value + + elif fuzzy: + skipped_idxs.append(i) + + # Check for a timezone name + elif self._could_be_tzname(res.hour, res.tzname, res.tzoffset, l[i]): + res.tzname = l[i] + res.tzoffset = info.tzoffset(res.tzname) + + # Check for something like GMT+3, or BRST+3. Notice + # that it doesn't mean "I am 3 hours after GMT", but + # "my time +3 is GMT". If found, we reverse the + # logic so that timezone parsing code will get it + # right. + if i + 1 < len_l and l[i + 1] in ('+', '-'): + l[i + 1] = ('+', '-')[l[i + 1] == '+'] + res.tzoffset = None + if info.utczone(res.tzname): + # With something like GMT+3, the timezone + # is *not* GMT. + res.tzname = None + + # Check for a numbered timezone + elif res.hour is not None and l[i] in ('+', '-'): + signal = (-1, 1)[l[i] == '+'] + len_li = len(l[i + 1]) + + # TODO: check that l[i + 1] is integer? + if len_li == 4: + # -0300 + hour_offset = int(l[i + 1][:2]) + min_offset = int(l[i + 1][2:]) + elif i + 2 < len_l and l[i + 2] == ':': + # -03:00 + hour_offset = int(l[i + 1]) + min_offset = int(l[i + 3]) # TODO: Check that l[i+3] is minute-like? + i += 2 + elif len_li <= 2: + # -[0]3 + hour_offset = int(l[i + 1][:2]) + min_offset = 0 + else: + raise ValueError(timestr) + + res.tzoffset = signal * (hour_offset * 3600 + min_offset * 60) + + # Look for a timezone name between parenthesis + if (i + 5 < len_l and + info.jump(l[i + 2]) and l[i + 3] == '(' and + l[i + 5] == ')' and + 3 <= len(l[i + 4]) and + self._could_be_tzname(res.hour, res.tzname, + None, l[i + 4])): + # -0300 (BRST) + res.tzname = l[i + 4] + i += 4 + + i += 1 + + # Check jumps + elif not (info.jump(l[i]) or fuzzy): + raise ValueError(timestr) + + else: + skipped_idxs.append(i) + i += 1 + + # Process year/month/day + year, month, day = ymd.resolve_ymd(yearfirst, dayfirst) + + res.century_specified = ymd.century_specified + res.year = year + res.month = month + res.day = day + + except (IndexError, ValueError): + return None, None + + if not info.validate(res): + return None, None + + if fuzzy_with_tokens: + skipped_tokens = self._recombine_skipped(l, skipped_idxs) + return res, tuple(skipped_tokens) + else: + return res, None + + def _parse_numeric_token(self, tokens, idx, info, ymd, res, fuzzy): + # Token is a number + value_repr = tokens[idx] + try: + value = self._to_decimal(value_repr) + except Exception as e: + six.raise_from(ValueError('Unknown numeric token'), e) + + len_li = len(value_repr) + + len_l = len(tokens) + + if (len(ymd) == 3 and len_li in (2, 4) and + res.hour is None and + (idx + 1 >= len_l or + (tokens[idx + 1] != ':' and + info.hms(tokens[idx + 1]) is None))): + # 19990101T23[59] + s = tokens[idx] + res.hour = int(s[:2]) + + if len_li == 4: + res.minute = int(s[2:]) + + elif len_li == 6 or (len_li > 6 and tokens[idx].find('.') == 6): + # YYMMDD or HHMMSS[.ss] + s = tokens[idx] + + if not ymd and '.' not in tokens[idx]: + ymd.append(s[:2]) + ymd.append(s[2:4]) + ymd.append(s[4:]) + else: + # 19990101T235959[.59] + + # TODO: Check if res attributes already set. + res.hour = int(s[:2]) + res.minute = int(s[2:4]) + res.second, res.microsecond = self._parsems(s[4:]) + + elif len_li in (8, 12, 14): + # YYYYMMDD + s = tokens[idx] + ymd.append(s[:4], 'Y') + ymd.append(s[4:6]) + ymd.append(s[6:8]) + + if len_li > 8: + res.hour = int(s[8:10]) + res.minute = int(s[10:12]) + + if len_li > 12: + res.second = int(s[12:]) + + elif self._find_hms_idx(idx, tokens, info, allow_jump=True) is not None: + # HH[ ]h or MM[ ]m or SS[.ss][ ]s + hms_idx = self._find_hms_idx(idx, tokens, info, allow_jump=True) + (idx, hms) = self._parse_hms(idx, tokens, info, hms_idx) + if hms is not None: + # TODO: checking that hour/minute/second are not + # already set? + self._assign_hms(res, value_repr, hms) + + elif idx + 2 < len_l and tokens[idx + 1] == ':': + # HH:MM[:SS[.ss]] + res.hour = int(value) + value = self._to_decimal(tokens[idx + 2]) # TODO: try/except for this? + (res.minute, res.second) = self._parse_min_sec(value) + + if idx + 4 < len_l and tokens[idx + 3] == ':': + res.second, res.microsecond = self._parsems(tokens[idx + 4]) + + idx += 2 + + idx += 2 + + elif idx + 1 < len_l and tokens[idx + 1] in ('-', '/', '.'): + sep = tokens[idx + 1] + ymd.append(value_repr) + + if idx + 2 < len_l and not info.jump(tokens[idx + 2]): + if tokens[idx + 2].isdigit(): + # 01-01[-01] + ymd.append(tokens[idx + 2]) + else: + # 01-Jan[-01] + value = info.month(tokens[idx + 2]) + + if value is not None: + ymd.append(value, 'M') + else: + raise ValueError() + + if idx + 3 < len_l and tokens[idx + 3] == sep: + # We have three members + value = info.month(tokens[idx + 4]) + + if value is not None: + ymd.append(value, 'M') + else: + ymd.append(tokens[idx + 4]) + idx += 2 + + idx += 1 + idx += 1 + + elif idx + 1 >= len_l or info.jump(tokens[idx + 1]): + if idx + 2 < len_l and info.ampm(tokens[idx + 2]) is not None: + # 12 am + hour = int(value) + res.hour = self._adjust_ampm(hour, info.ampm(tokens[idx + 2])) + idx += 1 + else: + # Year, month or day + ymd.append(value) + idx += 1 + + elif info.ampm(tokens[idx + 1]) is not None and (0 <= value < 24): + # 12am + hour = int(value) + res.hour = self._adjust_ampm(hour, info.ampm(tokens[idx + 1])) + idx += 1 + + elif ymd.could_be_day(value): + ymd.append(value) + + elif not fuzzy: + raise ValueError() + + return idx + + def _find_hms_idx(self, idx, tokens, info, allow_jump): + len_l = len(tokens) + + if idx+1 < len_l and info.hms(tokens[idx+1]) is not None: + # There is an "h", "m", or "s" label following this token. We take + # assign the upcoming label to the current token. + # e.g. the "12" in 12h" + hms_idx = idx + 1 + + elif (allow_jump and idx+2 < len_l and tokens[idx+1] == ' ' and + info.hms(tokens[idx+2]) is not None): + # There is a space and then an "h", "m", or "s" label. + # e.g. the "12" in "12 h" + hms_idx = idx + 2 + + elif idx > 0 and info.hms(tokens[idx-1]) is not None: + # There is a "h", "m", or "s" preceding this token. Since neither + # of the previous cases was hit, there is no label following this + # token, so we use the previous label. + # e.g. the "04" in "12h04" + hms_idx = idx-1 + + elif (1 < idx == len_l-1 and tokens[idx-1] == ' ' and + info.hms(tokens[idx-2]) is not None): + # If we are looking at the final token, we allow for a + # backward-looking check to skip over a space. + # TODO: Are we sure this is the right condition here? + hms_idx = idx - 2 + + else: + hms_idx = None + + return hms_idx + + def _assign_hms(self, res, value_repr, hms): + # See GH issue #427, fixing float rounding + value = self._to_decimal(value_repr) + + if hms == 0: + # Hour + res.hour = int(value) + if value % 1: + res.minute = int(60*(value % 1)) + + elif hms == 1: + (res.minute, res.second) = self._parse_min_sec(value) + + elif hms == 2: + (res.second, res.microsecond) = self._parsems(value_repr) + + def _could_be_tzname(self, hour, tzname, tzoffset, token): + return (hour is not None and + tzname is None and + tzoffset is None and + len(token) <= 5 and + (all(x in string.ascii_uppercase for x in token) + or token in self.info.UTCZONE)) + + def _ampm_valid(self, hour, ampm, fuzzy): + """ + For fuzzy parsing, 'a' or 'am' (both valid English words) + may erroneously trigger the AM/PM flag. Deal with that + here. + """ + val_is_ampm = True + + # If there's already an AM/PM flag, this one isn't one. + if fuzzy and ampm is not None: + val_is_ampm = False + + # If AM/PM is found and hour is not, raise a ValueError + if hour is None: + if fuzzy: + val_is_ampm = False + else: + raise ValueError('No hour specified with AM or PM flag.') + elif not 0 <= hour <= 12: + # If AM/PM is found, it's a 12 hour clock, so raise + # an error for invalid range + if fuzzy: + val_is_ampm = False + else: + raise ValueError('Invalid hour specified for 12-hour clock.') + + return val_is_ampm + + def _adjust_ampm(self, hour, ampm): + if hour < 12 and ampm == 1: + hour += 12 + elif hour == 12 and ampm == 0: + hour = 0 + return hour + + def _parse_min_sec(self, value): + # TODO: Every usage of this function sets res.second to the return + # value. Are there any cases where second will be returned as None and + # we *don't* want to set res.second = None? + minute = int(value) + second = None + + sec_remainder = value % 1 + if sec_remainder: + second = int(60 * sec_remainder) + return (minute, second) + + def _parse_hms(self, idx, tokens, info, hms_idx): + # TODO: Is this going to admit a lot of false-positives for when we + # just happen to have digits and "h", "m" or "s" characters in non-date + # text? I guess hex hashes won't have that problem, but there's plenty + # of random junk out there. + if hms_idx is None: + hms = None + new_idx = idx + elif hms_idx > idx: + hms = info.hms(tokens[hms_idx]) + new_idx = hms_idx + else: + # Looking backwards, increment one. + hms = info.hms(tokens[hms_idx]) + 1 + new_idx = idx + + return (new_idx, hms) + + # ------------------------------------------------------------------ + # Handling for individual tokens. These are kept as methods instead + # of functions for the sake of customizability via subclassing. + + def _parsems(self, value): + """Parse a I[.F] seconds value into (seconds, microseconds).""" + if "." not in value: + return int(value), 0 + else: + i, f = value.split(".") + return int(i), int(f.ljust(6, "0")[:6]) + + def _to_decimal(self, val): + try: + decimal_value = Decimal(val) + # See GH 662, edge case, infinite value should not be converted + # via `_to_decimal` + if not decimal_value.is_finite(): + raise ValueError("Converted decimal value is infinite or NaN") + except Exception as e: + msg = "Could not convert %s to decimal" % val + six.raise_from(ValueError(msg), e) + else: + return decimal_value + + # ------------------------------------------------------------------ + # Post-Parsing construction of datetime output. These are kept as + # methods instead of functions for the sake of customizability via + # subclassing. + + def _build_tzinfo(self, tzinfos, tzname, tzoffset): + if callable(tzinfos): + tzdata = tzinfos(tzname, tzoffset) + else: + tzdata = tzinfos.get(tzname) + # handle case where tzinfo is paased an options that returns None + # eg tzinfos = {'BRST' : None} + if isinstance(tzdata, datetime.tzinfo) or tzdata is None: + tzinfo = tzdata + elif isinstance(tzdata, text_type): + tzinfo = tz.tzstr(tzdata) + elif isinstance(tzdata, integer_types): + tzinfo = tz.tzoffset(tzname, tzdata) + else: + raise TypeError("Offset must be tzinfo subclass, tz string, " + "or int offset.") + return tzinfo + + def _build_tzaware(self, naive, res, tzinfos): + if (callable(tzinfos) or (tzinfos and res.tzname in tzinfos)): + tzinfo = self._build_tzinfo(tzinfos, res.tzname, res.tzoffset) + aware = naive.replace(tzinfo=tzinfo) + aware = self._assign_tzname(aware, res.tzname) + + elif res.tzname and res.tzname in time.tzname: + aware = naive.replace(tzinfo=tz.tzlocal()) + + # Handle ambiguous local datetime + aware = self._assign_tzname(aware, res.tzname) + + # This is mostly relevant for winter GMT zones parsed in the UK + if (aware.tzname() != res.tzname and + res.tzname in self.info.UTCZONE): + aware = aware.replace(tzinfo=tz.UTC) + + elif res.tzoffset == 0: + aware = naive.replace(tzinfo=tz.UTC) + + elif res.tzoffset: + aware = naive.replace(tzinfo=tz.tzoffset(res.tzname, res.tzoffset)) + + elif not res.tzname and not res.tzoffset: + # i.e. no timezone information was found. + aware = naive + + elif res.tzname: + # tz-like string was parsed but we don't know what to do + # with it + warnings.warn("tzname {tzname} identified but not understood. " + "Pass `tzinfos` argument in order to correctly " + "return a timezone-aware datetime. In a future " + "version, this will raise an " + "exception.".format(tzname=res.tzname), + category=UnknownTimezoneWarning) + aware = naive + + return aware + + def _build_naive(self, res, default): + repl = {} + for attr in ("year", "month", "day", "hour", + "minute", "second", "microsecond"): + value = getattr(res, attr) + if value is not None: + repl[attr] = value + + if 'day' not in repl: + # If the default day exceeds the last day of the month, fall back + # to the end of the month. + cyear = default.year if res.year is None else res.year + cmonth = default.month if res.month is None else res.month + cday = default.day if res.day is None else res.day + + if cday > monthrange(cyear, cmonth)[1]: + repl['day'] = monthrange(cyear, cmonth)[1] + + naive = default.replace(**repl) + + if res.weekday is not None and not res.day: + naive = naive + relativedelta.relativedelta(weekday=res.weekday) + + return naive + + def _assign_tzname(self, dt, tzname): + if dt.tzname() != tzname: + new_dt = tz.enfold(dt, fold=1) + if new_dt.tzname() == tzname: + return new_dt + + return dt + + def _recombine_skipped(self, tokens, skipped_idxs): + """ + >>> tokens = ["foo", " ", "bar", " ", "19June2000", "baz"] + >>> skipped_idxs = [0, 1, 2, 5] + >>> _recombine_skipped(tokens, skipped_idxs) + ["foo bar", "baz"] + """ + skipped_tokens = [] + for i, idx in enumerate(sorted(skipped_idxs)): + if i > 0 and idx - 1 == skipped_idxs[i - 1]: + skipped_tokens[-1] = skipped_tokens[-1] + tokens[idx] + else: + skipped_tokens.append(tokens[idx]) + + return skipped_tokens + + +DEFAULTPARSER = parser() + + +def parse(timestr, parserinfo=None, **kwargs): + """ + + Parse a string in one of the supported formats, using the + ``parserinfo`` parameters. + + :param timestr: + A string containing a date/time stamp. + + :param parserinfo: + A :class:`parserinfo` object containing parameters for the parser. + If ``None``, the default arguments to the :class:`parserinfo` + constructor are used. + + The ``**kwargs`` parameter takes the following keyword arguments: + + :param default: + The default datetime object, if this is a datetime object and not + ``None``, elements specified in ``timestr`` replace elements in the + default object. + + :param ignoretz: + If set ``True``, time zones in parsed strings are ignored and a naive + :class:`datetime` object is returned. + + :param tzinfos: + Additional time zone names / aliases which may be present in the + string. This argument maps time zone names (and optionally offsets + from those time zones) to time zones. This parameter can be a + dictionary with timezone aliases mapping time zone names to time + zones or a function taking two parameters (``tzname`` and + ``tzoffset``) and returning a time zone. + + The timezones to which the names are mapped can be an integer + offset from UTC in seconds or a :class:`tzinfo` object. + + .. doctest:: + :options: +NORMALIZE_WHITESPACE + + >>> from dateutil.parser import parse + >>> from dateutil.tz import gettz + >>> tzinfos = {"BRST": -7200, "CST": gettz("America/Chicago")} + >>> parse("2012-01-19 17:21:00 BRST", tzinfos=tzinfos) + datetime.datetime(2012, 1, 19, 17, 21, tzinfo=tzoffset(u'BRST', -7200)) + >>> parse("2012-01-19 17:21:00 CST", tzinfos=tzinfos) + datetime.datetime(2012, 1, 19, 17, 21, + tzinfo=tzfile('/usr/share/zoneinfo/America/Chicago')) + + This parameter is ignored if ``ignoretz`` is set. + + :param dayfirst: + Whether to interpret the first value in an ambiguous 3-integer date + (e.g. 01/05/09) as the day (``True``) or month (``False``). If + ``yearfirst`` is set to ``True``, this distinguishes between YDM and + YMD. If set to ``None``, this value is retrieved from the current + :class:`parserinfo` object (which itself defaults to ``False``). + + :param yearfirst: + Whether to interpret the first value in an ambiguous 3-integer date + (e.g. 01/05/09) as the year. If ``True``, the first number is taken to + be the year, otherwise the last number is taken to be the year. If + this is set to ``None``, the value is retrieved from the current + :class:`parserinfo` object (which itself defaults to ``False``). + + :param fuzzy: + Whether to allow fuzzy parsing, allowing for string like "Today is + January 1, 2047 at 8:21:00AM". + + :param fuzzy_with_tokens: + If ``True``, ``fuzzy`` is automatically set to True, and the parser + will return a tuple where the first element is the parsed + :class:`datetime.datetime` datetimestamp and the second element is + a tuple containing the portions of the string which were ignored: + + .. doctest:: + + >>> from dateutil.parser import parse + >>> parse("Today is January 1, 2047 at 8:21:00AM", fuzzy_with_tokens=True) + (datetime.datetime(2047, 1, 1, 8, 21), (u'Today is ', u' ', u'at ')) + + :return: + Returns a :class:`datetime.datetime` object or, if the + ``fuzzy_with_tokens`` option is ``True``, returns a tuple, the + first element being a :class:`datetime.datetime` object, the second + a tuple containing the fuzzy tokens. + + :raises ParserError: + Raised for invalid or unknown string formats, if the provided + :class:`tzinfo` is not in a valid format, or if an invalid date would + be created. + + :raises OverflowError: + Raised if the parsed date exceeds the largest valid C integer on + your system. + """ + if parserinfo: + return parser(parserinfo).parse(timestr, **kwargs) + else: + return DEFAULTPARSER.parse(timestr, **kwargs) + + +class _tzparser(object): + + class _result(_resultbase): + + __slots__ = ["stdabbr", "stdoffset", "dstabbr", "dstoffset", + "start", "end"] + + class _attr(_resultbase): + __slots__ = ["month", "week", "weekday", + "yday", "jyday", "day", "time"] + + def __repr__(self): + return self._repr("") + + def __init__(self): + _resultbase.__init__(self) + self.start = self._attr() + self.end = self._attr() + + def parse(self, tzstr): + res = self._result() + l = [x for x in re.split(r'([,:.]|[a-zA-Z]+|[0-9]+)',tzstr) if x] + used_idxs = list() + try: + + len_l = len(l) + + i = 0 + while i < len_l: + # BRST+3[BRDT[+2]] + j = i + while j < len_l and not [x for x in l[j] + if x in "0123456789:,-+"]: + j += 1 + if j != i: + if not res.stdabbr: + offattr = "stdoffset" + res.stdabbr = "".join(l[i:j]) + else: + offattr = "dstoffset" + res.dstabbr = "".join(l[i:j]) + + for ii in range(j): + used_idxs.append(ii) + i = j + if (i < len_l and (l[i] in ('+', '-') or l[i][0] in + "0123456789")): + if l[i] in ('+', '-'): + # Yes, that's right. See the TZ variable + # documentation. + signal = (1, -1)[l[i] == '+'] + used_idxs.append(i) + i += 1 + else: + signal = -1 + len_li = len(l[i]) + if len_li == 4: + # -0300 + setattr(res, offattr, (int(l[i][:2]) * 3600 + + int(l[i][2:]) * 60) * signal) + elif i + 1 < len_l and l[i + 1] == ':': + # -03:00 + setattr(res, offattr, + (int(l[i]) * 3600 + + int(l[i + 2]) * 60) * signal) + used_idxs.append(i) + i += 2 + elif len_li <= 2: + # -[0]3 + setattr(res, offattr, + int(l[i][:2]) * 3600 * signal) + else: + return None + used_idxs.append(i) + i += 1 + if res.dstabbr: + break + else: + break + + + if i < len_l: + for j in range(i, len_l): + if l[j] == ';': + l[j] = ',' + + assert l[i] == ',' + + i += 1 + + if i >= len_l: + pass + elif (8 <= l.count(',') <= 9 and + not [y for x in l[i:] if x != ',' + for y in x if y not in "0123456789+-"]): + # GMT0BST,3,0,30,3600,10,0,26,7200[,3600] + for x in (res.start, res.end): + x.month = int(l[i]) + used_idxs.append(i) + i += 2 + if l[i] == '-': + value = int(l[i + 1]) * -1 + used_idxs.append(i) + i += 1 + else: + value = int(l[i]) + used_idxs.append(i) + i += 2 + if value: + x.week = value + x.weekday = (int(l[i]) - 1) % 7 + else: + x.day = int(l[i]) + used_idxs.append(i) + i += 2 + x.time = int(l[i]) + used_idxs.append(i) + i += 2 + if i < len_l: + if l[i] in ('-', '+'): + signal = (-1, 1)[l[i] == "+"] + used_idxs.append(i) + i += 1 + else: + signal = 1 + used_idxs.append(i) + res.dstoffset = (res.stdoffset + int(l[i]) * signal) + + # This was a made-up format that is not in normal use + warn(('Parsed time zone "%s"' % tzstr) + + 'is in a non-standard dateutil-specific format, which ' + + 'is now deprecated; support for parsing this format ' + + 'will be removed in future versions. It is recommended ' + + 'that you switch to a standard format like the GNU ' + + 'TZ variable format.', tz.DeprecatedTzFormatWarning) + elif (l.count(',') == 2 and l[i:].count('/') <= 2 and + not [y for x in l[i:] if x not in (',', '/', 'J', 'M', + '.', '-', ':') + for y in x if y not in "0123456789"]): + for x in (res.start, res.end): + if l[i] == 'J': + # non-leap year day (1 based) + used_idxs.append(i) + i += 1 + x.jyday = int(l[i]) + elif l[i] == 'M': + # month[-.]week[-.]weekday + used_idxs.append(i) + i += 1 + x.month = int(l[i]) + used_idxs.append(i) + i += 1 + assert l[i] in ('-', '.') + used_idxs.append(i) + i += 1 + x.week = int(l[i]) + if x.week == 5: + x.week = -1 + used_idxs.append(i) + i += 1 + assert l[i] in ('-', '.') + used_idxs.append(i) + i += 1 + x.weekday = (int(l[i]) - 1) % 7 + else: + # year day (zero based) + x.yday = int(l[i]) + 1 + + used_idxs.append(i) + i += 1 + + if i < len_l and l[i] == '/': + used_idxs.append(i) + i += 1 + # start time + len_li = len(l[i]) + if len_li == 4: + # -0300 + x.time = (int(l[i][:2]) * 3600 + + int(l[i][2:]) * 60) + elif i + 1 < len_l and l[i + 1] == ':': + # -03:00 + x.time = int(l[i]) * 3600 + int(l[i + 2]) * 60 + used_idxs.append(i) + i += 2 + if i + 1 < len_l and l[i + 1] == ':': + used_idxs.append(i) + i += 2 + x.time += int(l[i]) + elif len_li <= 2: + # -[0]3 + x.time = (int(l[i][:2]) * 3600) + else: + return None + used_idxs.append(i) + i += 1 + + assert i == len_l or l[i] == ',' + + i += 1 + + assert i >= len_l + + except (IndexError, ValueError, AssertionError): + return None + + unused_idxs = set(range(len_l)).difference(used_idxs) + res.any_unused_tokens = not {l[n] for n in unused_idxs}.issubset({",",":"}) + return res + + +DEFAULTTZPARSER = _tzparser() + + +def _parsetz(tzstr): + return DEFAULTTZPARSER.parse(tzstr) + + +class ParserError(ValueError): + """Exception subclass used for any failure to parse a datetime string. + + This is a subclass of :py:exc:`ValueError`, and should be raised any time + earlier versions of ``dateutil`` would have raised ``ValueError``. + + .. versionadded:: 2.8.1 + """ + def __str__(self): + try: + return self.args[0] % self.args[1:] + except (TypeError, IndexError): + return super(ParserError, self).__str__() + + def __repr__(self): + args = ", ".join("'%s'" % arg for arg in self.args) + return "%s(%s)" % (self.__class__.__name__, args) + + +class UnknownTimezoneWarning(RuntimeWarning): + """Raised when the parser finds a timezone it cannot parse into a tzinfo. + + .. versionadded:: 2.7.0 + """ +# vim:ts=4:sw=4:et diff --git a/third_party/python/python_dateutil/dateutil/parser/isoparser.py b/third_party/python/python_dateutil/dateutil/parser/isoparser.py new file mode 100644 index 0000000000000..5d7bee38006d4 --- /dev/null +++ b/third_party/python/python_dateutil/dateutil/parser/isoparser.py @@ -0,0 +1,416 @@ +# -*- coding: utf-8 -*- +""" +This module offers a parser for ISO-8601 strings + +It is intended to support all valid date, time and datetime formats per the +ISO-8601 specification. + +..versionadded:: 2.7.0 +""" +from datetime import datetime, timedelta, time, date +import calendar +from dateutil import tz + +from functools import wraps + +import re +import six + +__all__ = ["isoparse", "isoparser"] + + +def _takes_ascii(f): + @wraps(f) + def func(self, str_in, *args, **kwargs): + # If it's a stream, read the whole thing + str_in = getattr(str_in, 'read', lambda: str_in)() + + # If it's unicode, turn it into bytes, since ISO-8601 only covers ASCII + if isinstance(str_in, six.text_type): + # ASCII is the same in UTF-8 + try: + str_in = str_in.encode('ascii') + except UnicodeEncodeError as e: + msg = 'ISO-8601 strings should contain only ASCII characters' + six.raise_from(ValueError(msg), e) + + return f(self, str_in, *args, **kwargs) + + return func + + +class isoparser(object): + def __init__(self, sep=None): + """ + :param sep: + A single character that separates date and time portions. If + ``None``, the parser will accept any single character. + For strict ISO-8601 adherence, pass ``'T'``. + """ + if sep is not None: + if (len(sep) != 1 or ord(sep) >= 128 or sep in '0123456789'): + raise ValueError('Separator must be a single, non-numeric ' + + 'ASCII character') + + sep = sep.encode('ascii') + + self._sep = sep + + @_takes_ascii + def isoparse(self, dt_str): + """ + Parse an ISO-8601 datetime string into a :class:`datetime.datetime`. + + An ISO-8601 datetime string consists of a date portion, followed + optionally by a time portion - the date and time portions are separated + by a single character separator, which is ``T`` in the official + standard. Incomplete date formats (such as ``YYYY-MM``) may *not* be + combined with a time portion. + + Supported date formats are: + + Common: + + - ``YYYY`` + - ``YYYY-MM`` or ``YYYYMM`` + - ``YYYY-MM-DD`` or ``YYYYMMDD`` + + Uncommon: + + - ``YYYY-Www`` or ``YYYYWww`` - ISO week (day defaults to 0) + - ``YYYY-Www-D`` or ``YYYYWwwD`` - ISO week and day + + The ISO week and day numbering follows the same logic as + :func:`datetime.date.isocalendar`. + + Supported time formats are: + + - ``hh`` + - ``hh:mm`` or ``hhmm`` + - ``hh:mm:ss`` or ``hhmmss`` + - ``hh:mm:ss.ssssss`` (Up to 6 sub-second digits) + + Midnight is a special case for `hh`, as the standard supports both + 00:00 and 24:00 as a representation. The decimal separator can be + either a dot or a comma. + + + .. caution:: + + Support for fractional components other than seconds is part of the + ISO-8601 standard, but is not currently implemented in this parser. + + Supported time zone offset formats are: + + - `Z` (UTC) + - `±HH:MM` + - `±HHMM` + - `±HH` + + Offsets will be represented as :class:`dateutil.tz.tzoffset` objects, + with the exception of UTC, which will be represented as + :class:`dateutil.tz.tzutc`. Time zone offsets equivalent to UTC (such + as `+00:00`) will also be represented as :class:`dateutil.tz.tzutc`. + + :param dt_str: + A string or stream containing only an ISO-8601 datetime string + + :return: + Returns a :class:`datetime.datetime` representing the string. + Unspecified components default to their lowest value. + + .. warning:: + + As of version 2.7.0, the strictness of the parser should not be + considered a stable part of the contract. Any valid ISO-8601 string + that parses correctly with the default settings will continue to + parse correctly in future versions, but invalid strings that + currently fail (e.g. ``2017-01-01T00:00+00:00:00``) are not + guaranteed to continue failing in future versions if they encode + a valid date. + + .. versionadded:: 2.7.0 + """ + components, pos = self._parse_isodate(dt_str) + + if len(dt_str) > pos: + if self._sep is None or dt_str[pos:pos + 1] == self._sep: + components += self._parse_isotime(dt_str[pos + 1:]) + else: + raise ValueError('String contains unknown ISO components') + + if len(components) > 3 and components[3] == 24: + components[3] = 0 + return datetime(*components) + timedelta(days=1) + + return datetime(*components) + + @_takes_ascii + def parse_isodate(self, datestr): + """ + Parse the date portion of an ISO string. + + :param datestr: + The string portion of an ISO string, without a separator + + :return: + Returns a :class:`datetime.date` object + """ + components, pos = self._parse_isodate(datestr) + if pos < len(datestr): + raise ValueError('String contains unknown ISO ' + + 'components: {!r}'.format(datestr.decode('ascii'))) + return date(*components) + + @_takes_ascii + def parse_isotime(self, timestr): + """ + Parse the time portion of an ISO string. + + :param timestr: + The time portion of an ISO string, without a separator + + :return: + Returns a :class:`datetime.time` object + """ + components = self._parse_isotime(timestr) + if components[0] == 24: + components[0] = 0 + return time(*components) + + @_takes_ascii + def parse_tzstr(self, tzstr, zero_as_utc=True): + """ + Parse a valid ISO time zone string. + + See :func:`isoparser.isoparse` for details on supported formats. + + :param tzstr: + A string representing an ISO time zone offset + + :param zero_as_utc: + Whether to return :class:`dateutil.tz.tzutc` for zero-offset zones + + :return: + Returns :class:`dateutil.tz.tzoffset` for offsets and + :class:`dateutil.tz.tzutc` for ``Z`` and (if ``zero_as_utc`` is + specified) offsets equivalent to UTC. + """ + return self._parse_tzstr(tzstr, zero_as_utc=zero_as_utc) + + # Constants + _DATE_SEP = b'-' + _TIME_SEP = b':' + _FRACTION_REGEX = re.compile(b'[\\.,]([0-9]+)') + + def _parse_isodate(self, dt_str): + try: + return self._parse_isodate_common(dt_str) + except ValueError: + return self._parse_isodate_uncommon(dt_str) + + def _parse_isodate_common(self, dt_str): + len_str = len(dt_str) + components = [1, 1, 1] + + if len_str < 4: + raise ValueError('ISO string too short') + + # Year + components[0] = int(dt_str[0:4]) + pos = 4 + if pos >= len_str: + return components, pos + + has_sep = dt_str[pos:pos + 1] == self._DATE_SEP + if has_sep: + pos += 1 + + # Month + if len_str - pos < 2: + raise ValueError('Invalid common month') + + components[1] = int(dt_str[pos:pos + 2]) + pos += 2 + + if pos >= len_str: + if has_sep: + return components, pos + else: + raise ValueError('Invalid ISO format') + + if has_sep: + if dt_str[pos:pos + 1] != self._DATE_SEP: + raise ValueError('Invalid separator in ISO string') + pos += 1 + + # Day + if len_str - pos < 2: + raise ValueError('Invalid common day') + components[2] = int(dt_str[pos:pos + 2]) + return components, pos + 2 + + def _parse_isodate_uncommon(self, dt_str): + if len(dt_str) < 4: + raise ValueError('ISO string too short') + + # All ISO formats start with the year + year = int(dt_str[0:4]) + + has_sep = dt_str[4:5] == self._DATE_SEP + + pos = 4 + has_sep # Skip '-' if it's there + if dt_str[pos:pos + 1] == b'W': + # YYYY-?Www-?D? + pos += 1 + weekno = int(dt_str[pos:pos + 2]) + pos += 2 + + dayno = 1 + if len(dt_str) > pos: + if (dt_str[pos:pos + 1] == self._DATE_SEP) != has_sep: + raise ValueError('Inconsistent use of dash separator') + + pos += has_sep + + dayno = int(dt_str[pos:pos + 1]) + pos += 1 + + base_date = self._calculate_weekdate(year, weekno, dayno) + else: + # YYYYDDD or YYYY-DDD + if len(dt_str) - pos < 3: + raise ValueError('Invalid ordinal day') + + ordinal_day = int(dt_str[pos:pos + 3]) + pos += 3 + + if ordinal_day < 1 or ordinal_day > (365 + calendar.isleap(year)): + raise ValueError('Invalid ordinal day' + + ' {} for year {}'.format(ordinal_day, year)) + + base_date = date(year, 1, 1) + timedelta(days=ordinal_day - 1) + + components = [base_date.year, base_date.month, base_date.day] + return components, pos + + def _calculate_weekdate(self, year, week, day): + """ + Calculate the day of corresponding to the ISO year-week-day calendar. + + This function is effectively the inverse of + :func:`datetime.date.isocalendar`. + + :param year: + The year in the ISO calendar + + :param week: + The week in the ISO calendar - range is [1, 53] + + :param day: + The day in the ISO calendar - range is [1 (MON), 7 (SUN)] + + :return: + Returns a :class:`datetime.date` + """ + if not 0 < week < 54: + raise ValueError('Invalid week: {}'.format(week)) + + if not 0 < day < 8: # Range is 1-7 + raise ValueError('Invalid weekday: {}'.format(day)) + + # Get week 1 for the specific year: + jan_4 = date(year, 1, 4) # Week 1 always has January 4th in it + week_1 = jan_4 - timedelta(days=jan_4.isocalendar()[2] - 1) + + # Now add the specific number of weeks and days to get what we want + week_offset = (week - 1) * 7 + (day - 1) + return week_1 + timedelta(days=week_offset) + + def _parse_isotime(self, timestr): + len_str = len(timestr) + components = [0, 0, 0, 0, None] + pos = 0 + comp = -1 + + if len_str < 2: + raise ValueError('ISO time too short') + + has_sep = False + + while pos < len_str and comp < 5: + comp += 1 + + if timestr[pos:pos + 1] in b'-+Zz': + # Detect time zone boundary + components[-1] = self._parse_tzstr(timestr[pos:]) + pos = len_str + break + + if comp == 1 and timestr[pos:pos+1] == self._TIME_SEP: + has_sep = True + pos += 1 + elif comp == 2 and has_sep: + if timestr[pos:pos+1] != self._TIME_SEP: + raise ValueError('Inconsistent use of colon separator') + pos += 1 + + if comp < 3: + # Hour, minute, second + components[comp] = int(timestr[pos:pos + 2]) + pos += 2 + + if comp == 3: + # Fraction of a second + frac = self._FRACTION_REGEX.match(timestr[pos:]) + if not frac: + continue + + us_str = frac.group(1)[:6] # Truncate to microseconds + components[comp] = int(us_str) * 10**(6 - len(us_str)) + pos += len(frac.group()) + + if pos < len_str: + raise ValueError('Unused components in ISO string') + + if components[0] == 24: + # Standard supports 00:00 and 24:00 as representations of midnight + if any(component != 0 for component in components[1:4]): + raise ValueError('Hour may only be 24 at 24:00:00.000') + + return components + + def _parse_tzstr(self, tzstr, zero_as_utc=True): + if tzstr == b'Z' or tzstr == b'z': + return tz.UTC + + if len(tzstr) not in {3, 5, 6}: + raise ValueError('Time zone offset must be 1, 3, 5 or 6 characters') + + if tzstr[0:1] == b'-': + mult = -1 + elif tzstr[0:1] == b'+': + mult = 1 + else: + raise ValueError('Time zone offset requires sign') + + hours = int(tzstr[1:3]) + if len(tzstr) == 3: + minutes = 0 + else: + minutes = int(tzstr[(4 if tzstr[3:4] == self._TIME_SEP else 3):]) + + if zero_as_utc and hours == 0 and minutes == 0: + return tz.UTC + else: + if minutes > 59: + raise ValueError('Invalid minutes in time zone offset') + + if hours > 23: + raise ValueError('Invalid hours in time zone offset') + + return tz.tzoffset(None, mult * (hours * 60 + minutes) * 60) + + +DEFAULT_ISOPARSER = isoparser() +isoparse = DEFAULT_ISOPARSER.isoparse diff --git a/third_party/python/python_dateutil/dateutil/relativedelta.py b/third_party/python/python_dateutil/dateutil/relativedelta.py new file mode 100644 index 0000000000000..a9e85f7e6cd74 --- /dev/null +++ b/third_party/python/python_dateutil/dateutil/relativedelta.py @@ -0,0 +1,599 @@ +# -*- coding: utf-8 -*- +import datetime +import calendar + +import operator +from math import copysign + +from six import integer_types +from warnings import warn + +from ._common import weekday + +MO, TU, WE, TH, FR, SA, SU = weekdays = tuple(weekday(x) for x in range(7)) + +__all__ = ["relativedelta", "MO", "TU", "WE", "TH", "FR", "SA", "SU"] + + +class relativedelta(object): + """ + The relativedelta type is designed to be applied to an existing datetime and + can replace specific components of that datetime, or represents an interval + of time. + + It is based on the specification of the excellent work done by M.-A. Lemburg + in his + `mx.DateTime `_ extension. + However, notice that this type does *NOT* implement the same algorithm as + his work. Do *NOT* expect it to behave like mx.DateTime's counterpart. + + There are two different ways to build a relativedelta instance. The + first one is passing it two date/datetime classes:: + + relativedelta(datetime1, datetime2) + + The second one is passing it any number of the following keyword arguments:: + + relativedelta(arg1=x,arg2=y,arg3=z...) + + year, month, day, hour, minute, second, microsecond: + Absolute information (argument is singular); adding or subtracting a + relativedelta with absolute information does not perform an arithmetic + operation, but rather REPLACES the corresponding value in the + original datetime with the value(s) in relativedelta. + + years, months, weeks, days, hours, minutes, seconds, microseconds: + Relative information, may be negative (argument is plural); adding + or subtracting a relativedelta with relative information performs + the corresponding arithmetic operation on the original datetime value + with the information in the relativedelta. + + weekday: + One of the weekday instances (MO, TU, etc) available in the + relativedelta module. These instances may receive a parameter N, + specifying the Nth weekday, which could be positive or negative + (like MO(+1) or MO(-2)). Not specifying it is the same as specifying + +1. You can also use an integer, where 0=MO. This argument is always + relative e.g. if the calculated date is already Monday, using MO(1) + or MO(-1) won't change the day. To effectively make it absolute, use + it in combination with the day argument (e.g. day=1, MO(1) for first + Monday of the month). + + leapdays: + Will add given days to the date found, if year is a leap + year, and the date found is post 28 of february. + + yearday, nlyearday: + Set the yearday or the non-leap year day (jump leap days). + These are converted to day/month/leapdays information. + + There are relative and absolute forms of the keyword + arguments. The plural is relative, and the singular is + absolute. For each argument in the order below, the absolute form + is applied first (by setting each attribute to that value) and + then the relative form (by adding the value to the attribute). + + The order of attributes considered when this relativedelta is + added to a datetime is: + + 1. Year + 2. Month + 3. Day + 4. Hours + 5. Minutes + 6. Seconds + 7. Microseconds + + Finally, weekday is applied, using the rule described above. + + For example + + >>> from datetime import datetime + >>> from dateutil.relativedelta import relativedelta, MO + >>> dt = datetime(2018, 4, 9, 13, 37, 0) + >>> delta = relativedelta(hours=25, day=1, weekday=MO(1)) + >>> dt + delta + datetime.datetime(2018, 4, 2, 14, 37) + + First, the day is set to 1 (the first of the month), then 25 hours + are added, to get to the 2nd day and 14th hour, finally the + weekday is applied, but since the 2nd is already a Monday there is + no effect. + + """ + + def __init__(self, dt1=None, dt2=None, + years=0, months=0, days=0, leapdays=0, weeks=0, + hours=0, minutes=0, seconds=0, microseconds=0, + year=None, month=None, day=None, weekday=None, + yearday=None, nlyearday=None, + hour=None, minute=None, second=None, microsecond=None): + + if dt1 and dt2: + # datetime is a subclass of date. So both must be date + if not (isinstance(dt1, datetime.date) and + isinstance(dt2, datetime.date)): + raise TypeError("relativedelta only diffs datetime/date") + + # We allow two dates, or two datetimes, so we coerce them to be + # of the same type + if (isinstance(dt1, datetime.datetime) != + isinstance(dt2, datetime.datetime)): + if not isinstance(dt1, datetime.datetime): + dt1 = datetime.datetime.fromordinal(dt1.toordinal()) + elif not isinstance(dt2, datetime.datetime): + dt2 = datetime.datetime.fromordinal(dt2.toordinal()) + + self.years = 0 + self.months = 0 + self.days = 0 + self.leapdays = 0 + self.hours = 0 + self.minutes = 0 + self.seconds = 0 + self.microseconds = 0 + self.year = None + self.month = None + self.day = None + self.weekday = None + self.hour = None + self.minute = None + self.second = None + self.microsecond = None + self._has_time = 0 + + # Get year / month delta between the two + months = (dt1.year - dt2.year) * 12 + (dt1.month - dt2.month) + self._set_months(months) + + # Remove the year/month delta so the timedelta is just well-defined + # time units (seconds, days and microseconds) + dtm = self.__radd__(dt2) + + # If we've overshot our target, make an adjustment + if dt1 < dt2: + compare = operator.gt + increment = 1 + else: + compare = operator.lt + increment = -1 + + while compare(dt1, dtm): + months += increment + self._set_months(months) + dtm = self.__radd__(dt2) + + # Get the timedelta between the "months-adjusted" date and dt1 + delta = dt1 - dtm + self.seconds = delta.seconds + delta.days * 86400 + self.microseconds = delta.microseconds + else: + # Check for non-integer values in integer-only quantities + if any(x is not None and x != int(x) for x in (years, months)): + raise ValueError("Non-integer years and months are " + "ambiguous and not currently supported.") + + # Relative information + self.years = int(years) + self.months = int(months) + self.days = days + weeks * 7 + self.leapdays = leapdays + self.hours = hours + self.minutes = minutes + self.seconds = seconds + self.microseconds = microseconds + + # Absolute information + self.year = year + self.month = month + self.day = day + self.hour = hour + self.minute = minute + self.second = second + self.microsecond = microsecond + + if any(x is not None and int(x) != x + for x in (year, month, day, hour, + minute, second, microsecond)): + # For now we'll deprecate floats - later it'll be an error. + warn("Non-integer value passed as absolute information. " + + "This is not a well-defined condition and will raise " + + "errors in future versions.", DeprecationWarning) + + if isinstance(weekday, integer_types): + self.weekday = weekdays[weekday] + else: + self.weekday = weekday + + yday = 0 + if nlyearday: + yday = nlyearday + elif yearday: + yday = yearday + if yearday > 59: + self.leapdays = -1 + if yday: + ydayidx = [31, 59, 90, 120, 151, 181, 212, + 243, 273, 304, 334, 366] + for idx, ydays in enumerate(ydayidx): + if yday <= ydays: + self.month = idx+1 + if idx == 0: + self.day = yday + else: + self.day = yday-ydayidx[idx-1] + break + else: + raise ValueError("invalid year day (%d)" % yday) + + self._fix() + + def _fix(self): + if abs(self.microseconds) > 999999: + s = _sign(self.microseconds) + div, mod = divmod(self.microseconds * s, 1000000) + self.microseconds = mod * s + self.seconds += div * s + if abs(self.seconds) > 59: + s = _sign(self.seconds) + div, mod = divmod(self.seconds * s, 60) + self.seconds = mod * s + self.minutes += div * s + if abs(self.minutes) > 59: + s = _sign(self.minutes) + div, mod = divmod(self.minutes * s, 60) + self.minutes = mod * s + self.hours += div * s + if abs(self.hours) > 23: + s = _sign(self.hours) + div, mod = divmod(self.hours * s, 24) + self.hours = mod * s + self.days += div * s + if abs(self.months) > 11: + s = _sign(self.months) + div, mod = divmod(self.months * s, 12) + self.months = mod * s + self.years += div * s + if (self.hours or self.minutes or self.seconds or self.microseconds + or self.hour is not None or self.minute is not None or + self.second is not None or self.microsecond is not None): + self._has_time = 1 + else: + self._has_time = 0 + + @property + def weeks(self): + return int(self.days / 7.0) + + @weeks.setter + def weeks(self, value): + self.days = self.days - (self.weeks * 7) + value * 7 + + def _set_months(self, months): + self.months = months + if abs(self.months) > 11: + s = _sign(self.months) + div, mod = divmod(self.months * s, 12) + self.months = mod * s + self.years = div * s + else: + self.years = 0 + + def normalized(self): + """ + Return a version of this object represented entirely using integer + values for the relative attributes. + + >>> relativedelta(days=1.5, hours=2).normalized() + relativedelta(days=+1, hours=+14) + + :return: + Returns a :class:`dateutil.relativedelta.relativedelta` object. + """ + # Cascade remainders down (rounding each to roughly nearest microsecond) + days = int(self.days) + + hours_f = round(self.hours + 24 * (self.days - days), 11) + hours = int(hours_f) + + minutes_f = round(self.minutes + 60 * (hours_f - hours), 10) + minutes = int(minutes_f) + + seconds_f = round(self.seconds + 60 * (minutes_f - minutes), 8) + seconds = int(seconds_f) + + microseconds = round(self.microseconds + 1e6 * (seconds_f - seconds)) + + # Constructor carries overflow back up with call to _fix() + return self.__class__(years=self.years, months=self.months, + days=days, hours=hours, minutes=minutes, + seconds=seconds, microseconds=microseconds, + leapdays=self.leapdays, year=self.year, + month=self.month, day=self.day, + weekday=self.weekday, hour=self.hour, + minute=self.minute, second=self.second, + microsecond=self.microsecond) + + def __add__(self, other): + if isinstance(other, relativedelta): + return self.__class__(years=other.years + self.years, + months=other.months + self.months, + days=other.days + self.days, + hours=other.hours + self.hours, + minutes=other.minutes + self.minutes, + seconds=other.seconds + self.seconds, + microseconds=(other.microseconds + + self.microseconds), + leapdays=other.leapdays or self.leapdays, + year=(other.year if other.year is not None + else self.year), + month=(other.month if other.month is not None + else self.month), + day=(other.day if other.day is not None + else self.day), + weekday=(other.weekday if other.weekday is not None + else self.weekday), + hour=(other.hour if other.hour is not None + else self.hour), + minute=(other.minute if other.minute is not None + else self.minute), + second=(other.second if other.second is not None + else self.second), + microsecond=(other.microsecond if other.microsecond + is not None else + self.microsecond)) + if isinstance(other, datetime.timedelta): + return self.__class__(years=self.years, + months=self.months, + days=self.days + other.days, + hours=self.hours, + minutes=self.minutes, + seconds=self.seconds + other.seconds, + microseconds=self.microseconds + other.microseconds, + leapdays=self.leapdays, + year=self.year, + month=self.month, + day=self.day, + weekday=self.weekday, + hour=self.hour, + minute=self.minute, + second=self.second, + microsecond=self.microsecond) + if not isinstance(other, datetime.date): + return NotImplemented + elif self._has_time and not isinstance(other, datetime.datetime): + other = datetime.datetime.fromordinal(other.toordinal()) + year = (self.year or other.year)+self.years + month = self.month or other.month + if self.months: + assert 1 <= abs(self.months) <= 12 + month += self.months + if month > 12: + year += 1 + month -= 12 + elif month < 1: + year -= 1 + month += 12 + day = min(calendar.monthrange(year, month)[1], + self.day or other.day) + repl = {"year": year, "month": month, "day": day} + for attr in ["hour", "minute", "second", "microsecond"]: + value = getattr(self, attr) + if value is not None: + repl[attr] = value + days = self.days + if self.leapdays and month > 2 and calendar.isleap(year): + days += self.leapdays + ret = (other.replace(**repl) + + datetime.timedelta(days=days, + hours=self.hours, + minutes=self.minutes, + seconds=self.seconds, + microseconds=self.microseconds)) + if self.weekday: + weekday, nth = self.weekday.weekday, self.weekday.n or 1 + jumpdays = (abs(nth) - 1) * 7 + if nth > 0: + jumpdays += (7 - ret.weekday() + weekday) % 7 + else: + jumpdays += (ret.weekday() - weekday) % 7 + jumpdays *= -1 + ret += datetime.timedelta(days=jumpdays) + return ret + + def __radd__(self, other): + return self.__add__(other) + + def __rsub__(self, other): + return self.__neg__().__radd__(other) + + def __sub__(self, other): + if not isinstance(other, relativedelta): + return NotImplemented # In case the other object defines __rsub__ + return self.__class__(years=self.years - other.years, + months=self.months - other.months, + days=self.days - other.days, + hours=self.hours - other.hours, + minutes=self.minutes - other.minutes, + seconds=self.seconds - other.seconds, + microseconds=self.microseconds - other.microseconds, + leapdays=self.leapdays or other.leapdays, + year=(self.year if self.year is not None + else other.year), + month=(self.month if self.month is not None else + other.month), + day=(self.day if self.day is not None else + other.day), + weekday=(self.weekday if self.weekday is not None else + other.weekday), + hour=(self.hour if self.hour is not None else + other.hour), + minute=(self.minute if self.minute is not None else + other.minute), + second=(self.second if self.second is not None else + other.second), + microsecond=(self.microsecond if self.microsecond + is not None else + other.microsecond)) + + def __abs__(self): + return self.__class__(years=abs(self.years), + months=abs(self.months), + days=abs(self.days), + hours=abs(self.hours), + minutes=abs(self.minutes), + seconds=abs(self.seconds), + microseconds=abs(self.microseconds), + leapdays=self.leapdays, + year=self.year, + month=self.month, + day=self.day, + weekday=self.weekday, + hour=self.hour, + minute=self.minute, + second=self.second, + microsecond=self.microsecond) + + def __neg__(self): + return self.__class__(years=-self.years, + months=-self.months, + days=-self.days, + hours=-self.hours, + minutes=-self.minutes, + seconds=-self.seconds, + microseconds=-self.microseconds, + leapdays=self.leapdays, + year=self.year, + month=self.month, + day=self.day, + weekday=self.weekday, + hour=self.hour, + minute=self.minute, + second=self.second, + microsecond=self.microsecond) + + def __bool__(self): + return not (not self.years and + not self.months and + not self.days and + not self.hours and + not self.minutes and + not self.seconds and + not self.microseconds and + not self.leapdays and + self.year is None and + self.month is None and + self.day is None and + self.weekday is None and + self.hour is None and + self.minute is None and + self.second is None and + self.microsecond is None) + # Compatibility with Python 2.x + __nonzero__ = __bool__ + + def __mul__(self, other): + try: + f = float(other) + except TypeError: + return NotImplemented + + return self.__class__(years=int(self.years * f), + months=int(self.months * f), + days=int(self.days * f), + hours=int(self.hours * f), + minutes=int(self.minutes * f), + seconds=int(self.seconds * f), + microseconds=int(self.microseconds * f), + leapdays=self.leapdays, + year=self.year, + month=self.month, + day=self.day, + weekday=self.weekday, + hour=self.hour, + minute=self.minute, + second=self.second, + microsecond=self.microsecond) + + __rmul__ = __mul__ + + def __eq__(self, other): + if not isinstance(other, relativedelta): + return NotImplemented + if self.weekday or other.weekday: + if not self.weekday or not other.weekday: + return False + if self.weekday.weekday != other.weekday.weekday: + return False + n1, n2 = self.weekday.n, other.weekday.n + if n1 != n2 and not ((not n1 or n1 == 1) and (not n2 or n2 == 1)): + return False + return (self.years == other.years and + self.months == other.months and + self.days == other.days and + self.hours == other.hours and + self.minutes == other.minutes and + self.seconds == other.seconds and + self.microseconds == other.microseconds and + self.leapdays == other.leapdays and + self.year == other.year and + self.month == other.month and + self.day == other.day and + self.hour == other.hour and + self.minute == other.minute and + self.second == other.second and + self.microsecond == other.microsecond) + + def __hash__(self): + return hash(( + self.weekday, + self.years, + self.months, + self.days, + self.hours, + self.minutes, + self.seconds, + self.microseconds, + self.leapdays, + self.year, + self.month, + self.day, + self.hour, + self.minute, + self.second, + self.microsecond, + )) + + def __ne__(self, other): + return not self.__eq__(other) + + def __div__(self, other): + try: + reciprocal = 1 / float(other) + except TypeError: + return NotImplemented + + return self.__mul__(reciprocal) + + __truediv__ = __div__ + + def __repr__(self): + l = [] + for attr in ["years", "months", "days", "leapdays", + "hours", "minutes", "seconds", "microseconds"]: + value = getattr(self, attr) + if value: + l.append("{attr}={value:+g}".format(attr=attr, value=value)) + for attr in ["year", "month", "day", "weekday", + "hour", "minute", "second", "microsecond"]: + value = getattr(self, attr) + if value is not None: + l.append("{attr}={value}".format(attr=attr, value=repr(value))) + return "{classname}({attrs})".format(classname=self.__class__.__name__, + attrs=", ".join(l)) + + +def _sign(x): + return int(copysign(1, x)) + +# vim:ts=4:sw=4:et diff --git a/third_party/python/python_dateutil/dateutil/rrule.py b/third_party/python/python_dateutil/dateutil/rrule.py new file mode 100644 index 0000000000000..b3203393c6120 --- /dev/null +++ b/third_party/python/python_dateutil/dateutil/rrule.py @@ -0,0 +1,1737 @@ +# -*- coding: utf-8 -*- +""" +The rrule module offers a small, complete, and very fast, implementation of +the recurrence rules documented in the +`iCalendar RFC `_, +including support for caching of results. +""" +import calendar +import datetime +import heapq +import itertools +import re +import sys +from functools import wraps +# For warning about deprecation of until and count +from warnings import warn + +from six import advance_iterator, integer_types + +from six.moves import _thread, range + +from ._common import weekday as weekdaybase + +try: + from math import gcd +except ImportError: + from fractions import gcd + +__all__ = ["rrule", "rruleset", "rrulestr", + "YEARLY", "MONTHLY", "WEEKLY", "DAILY", + "HOURLY", "MINUTELY", "SECONDLY", + "MO", "TU", "WE", "TH", "FR", "SA", "SU"] + +# Every mask is 7 days longer to handle cross-year weekly periods. +M366MASK = tuple([1]*31+[2]*29+[3]*31+[4]*30+[5]*31+[6]*30 + + [7]*31+[8]*31+[9]*30+[10]*31+[11]*30+[12]*31+[1]*7) +M365MASK = list(M366MASK) +M29, M30, M31 = list(range(1, 30)), list(range(1, 31)), list(range(1, 32)) +MDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7]) +MDAY365MASK = list(MDAY366MASK) +M29, M30, M31 = list(range(-29, 0)), list(range(-30, 0)), list(range(-31, 0)) +NMDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7]) +NMDAY365MASK = list(NMDAY366MASK) +M366RANGE = (0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366) +M365RANGE = (0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365) +WDAYMASK = [0, 1, 2, 3, 4, 5, 6]*55 +del M29, M30, M31, M365MASK[59], MDAY365MASK[59], NMDAY365MASK[31] +MDAY365MASK = tuple(MDAY365MASK) +M365MASK = tuple(M365MASK) + +FREQNAMES = ['YEARLY', 'MONTHLY', 'WEEKLY', 'DAILY', 'HOURLY', 'MINUTELY', 'SECONDLY'] + +(YEARLY, + MONTHLY, + WEEKLY, + DAILY, + HOURLY, + MINUTELY, + SECONDLY) = list(range(7)) + +# Imported on demand. +easter = None +parser = None + + +class weekday(weekdaybase): + """ + This version of weekday does not allow n = 0. + """ + def __init__(self, wkday, n=None): + if n == 0: + raise ValueError("Can't create weekday with n==0") + + super(weekday, self).__init__(wkday, n) + + +MO, TU, WE, TH, FR, SA, SU = weekdays = tuple(weekday(x) for x in range(7)) + + +def _invalidates_cache(f): + """ + Decorator for rruleset methods which may invalidate the + cached length. + """ + @wraps(f) + def inner_func(self, *args, **kwargs): + rv = f(self, *args, **kwargs) + self._invalidate_cache() + return rv + + return inner_func + + +class rrulebase(object): + def __init__(self, cache=False): + if cache: + self._cache = [] + self._cache_lock = _thread.allocate_lock() + self._invalidate_cache() + else: + self._cache = None + self._cache_complete = False + self._len = None + + def __iter__(self): + if self._cache_complete: + return iter(self._cache) + elif self._cache is None: + return self._iter() + else: + return self._iter_cached() + + def _invalidate_cache(self): + if self._cache is not None: + self._cache = [] + self._cache_complete = False + self._cache_gen = self._iter() + + if self._cache_lock.locked(): + self._cache_lock.release() + + self._len = None + + def _iter_cached(self): + i = 0 + gen = self._cache_gen + cache = self._cache + acquire = self._cache_lock.acquire + release = self._cache_lock.release + while gen: + if i == len(cache): + acquire() + if self._cache_complete: + break + try: + for j in range(10): + cache.append(advance_iterator(gen)) + except StopIteration: + self._cache_gen = gen = None + self._cache_complete = True + break + release() + yield cache[i] + i += 1 + while i < self._len: + yield cache[i] + i += 1 + + def __getitem__(self, item): + if self._cache_complete: + return self._cache[item] + elif isinstance(item, slice): + if item.step and item.step < 0: + return list(iter(self))[item] + else: + return list(itertools.islice(self, + item.start or 0, + item.stop or sys.maxsize, + item.step or 1)) + elif item >= 0: + gen = iter(self) + try: + for i in range(item+1): + res = advance_iterator(gen) + except StopIteration: + raise IndexError + return res + else: + return list(iter(self))[item] + + def __contains__(self, item): + if self._cache_complete: + return item in self._cache + else: + for i in self: + if i == item: + return True + elif i > item: + return False + return False + + # __len__() introduces a large performance penalty. + def count(self): + """ Returns the number of recurrences in this set. It will have go + trough the whole recurrence, if this hasn't been done before. """ + if self._len is None: + for x in self: + pass + return self._len + + def before(self, dt, inc=False): + """ Returns the last recurrence before the given datetime instance. The + inc keyword defines what happens if dt is an occurrence. With + inc=True, if dt itself is an occurrence, it will be returned. """ + if self._cache_complete: + gen = self._cache + else: + gen = self + last = None + if inc: + for i in gen: + if i > dt: + break + last = i + else: + for i in gen: + if i >= dt: + break + last = i + return last + + def after(self, dt, inc=False): + """ Returns the first recurrence after the given datetime instance. The + inc keyword defines what happens if dt is an occurrence. With + inc=True, if dt itself is an occurrence, it will be returned. """ + if self._cache_complete: + gen = self._cache + else: + gen = self + if inc: + for i in gen: + if i >= dt: + return i + else: + for i in gen: + if i > dt: + return i + return None + + def xafter(self, dt, count=None, inc=False): + """ + Generator which yields up to `count` recurrences after the given + datetime instance, equivalent to `after`. + + :param dt: + The datetime at which to start generating recurrences. + + :param count: + The maximum number of recurrences to generate. If `None` (default), + dates are generated until the recurrence rule is exhausted. + + :param inc: + If `dt` is an instance of the rule and `inc` is `True`, it is + included in the output. + + :yields: Yields a sequence of `datetime` objects. + """ + + if self._cache_complete: + gen = self._cache + else: + gen = self + + # Select the comparison function + if inc: + comp = lambda dc, dtc: dc >= dtc + else: + comp = lambda dc, dtc: dc > dtc + + # Generate dates + n = 0 + for d in gen: + if comp(d, dt): + if count is not None: + n += 1 + if n > count: + break + + yield d + + def between(self, after, before, inc=False, count=1): + """ Returns all the occurrences of the rrule between after and before. + The inc keyword defines what happens if after and/or before are + themselves occurrences. With inc=True, they will be included in the + list, if they are found in the recurrence set. """ + if self._cache_complete: + gen = self._cache + else: + gen = self + started = False + l = [] + if inc: + for i in gen: + if i > before: + break + elif not started: + if i >= after: + started = True + l.append(i) + else: + l.append(i) + else: + for i in gen: + if i >= before: + break + elif not started: + if i > after: + started = True + l.append(i) + else: + l.append(i) + return l + + +class rrule(rrulebase): + """ + That's the base of the rrule operation. It accepts all the keywords + defined in the RFC as its constructor parameters (except byday, + which was renamed to byweekday) and more. The constructor prototype is:: + + rrule(freq) + + Where freq must be one of YEARLY, MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY, + or SECONDLY. + + .. note:: + Per RFC section 3.3.10, recurrence instances falling on invalid dates + and times are ignored rather than coerced: + + Recurrence rules may generate recurrence instances with an invalid + date (e.g., February 30) or nonexistent local time (e.g., 1:30 AM + on a day where the local time is moved forward by an hour at 1:00 + AM). Such recurrence instances MUST be ignored and MUST NOT be + counted as part of the recurrence set. + + This can lead to possibly surprising behavior when, for example, the + start date occurs at the end of the month: + + >>> from dateutil.rrule import rrule, MONTHLY + >>> from datetime import datetime + >>> start_date = datetime(2014, 12, 31) + >>> list(rrule(freq=MONTHLY, count=4, dtstart=start_date)) + ... # doctest: +NORMALIZE_WHITESPACE + [datetime.datetime(2014, 12, 31, 0, 0), + datetime.datetime(2015, 1, 31, 0, 0), + datetime.datetime(2015, 3, 31, 0, 0), + datetime.datetime(2015, 5, 31, 0, 0)] + + Additionally, it supports the following keyword arguments: + + :param dtstart: + The recurrence start. Besides being the base for the recurrence, + missing parameters in the final recurrence instances will also be + extracted from this date. If not given, datetime.now() will be used + instead. + :param interval: + The interval between each freq iteration. For example, when using + YEARLY, an interval of 2 means once every two years, but with HOURLY, + it means once every two hours. The default interval is 1. + :param wkst: + The week start day. Must be one of the MO, TU, WE constants, or an + integer, specifying the first day of the week. This will affect + recurrences based on weekly periods. The default week start is got + from calendar.firstweekday(), and may be modified by + calendar.setfirstweekday(). + :param count: + If given, this determines how many occurrences will be generated. + + .. note:: + As of version 2.5.0, the use of the keyword ``until`` in conjunction + with ``count`` is deprecated, to make sure ``dateutil`` is fully + compliant with `RFC-5545 Sec. 3.3.10 `_. Therefore, ``until`` and ``count`` + **must not** occur in the same call to ``rrule``. + :param until: + If given, this must be a datetime instance specifying the upper-bound + limit of the recurrence. The last recurrence in the rule is the greatest + datetime that is less than or equal to the value specified in the + ``until`` parameter. + + .. note:: + As of version 2.5.0, the use of the keyword ``until`` in conjunction + with ``count`` is deprecated, to make sure ``dateutil`` is fully + compliant with `RFC-5545 Sec. 3.3.10 `_. Therefore, ``until`` and ``count`` + **must not** occur in the same call to ``rrule``. + :param bysetpos: + If given, it must be either an integer, or a sequence of integers, + positive or negative. Each given integer will specify an occurrence + number, corresponding to the nth occurrence of the rule inside the + frequency period. For example, a bysetpos of -1 if combined with a + MONTHLY frequency, and a byweekday of (MO, TU, WE, TH, FR), will + result in the last work day of every month. + :param bymonth: + If given, it must be either an integer, or a sequence of integers, + meaning the months to apply the recurrence to. + :param bymonthday: + If given, it must be either an integer, or a sequence of integers, + meaning the month days to apply the recurrence to. + :param byyearday: + If given, it must be either an integer, or a sequence of integers, + meaning the year days to apply the recurrence to. + :param byeaster: + If given, it must be either an integer, or a sequence of integers, + positive or negative. Each integer will define an offset from the + Easter Sunday. Passing the offset 0 to byeaster will yield the Easter + Sunday itself. This is an extension to the RFC specification. + :param byweekno: + If given, it must be either an integer, or a sequence of integers, + meaning the week numbers to apply the recurrence to. Week numbers + have the meaning described in ISO8601, that is, the first week of + the year is that containing at least four days of the new year. + :param byweekday: + If given, it must be either an integer (0 == MO), a sequence of + integers, one of the weekday constants (MO, TU, etc), or a sequence + of these constants. When given, these variables will define the + weekdays where the recurrence will be applied. It's also possible to + use an argument n for the weekday instances, which will mean the nth + occurrence of this weekday in the period. For example, with MONTHLY, + or with YEARLY and BYMONTH, using FR(+1) in byweekday will specify the + first friday of the month where the recurrence happens. Notice that in + the RFC documentation, this is specified as BYDAY, but was renamed to + avoid the ambiguity of that keyword. + :param byhour: + If given, it must be either an integer, or a sequence of integers, + meaning the hours to apply the recurrence to. + :param byminute: + If given, it must be either an integer, or a sequence of integers, + meaning the minutes to apply the recurrence to. + :param bysecond: + If given, it must be either an integer, or a sequence of integers, + meaning the seconds to apply the recurrence to. + :param cache: + If given, it must be a boolean value specifying to enable or disable + caching of results. If you will use the same rrule instance multiple + times, enabling caching will improve the performance considerably. + """ + def __init__(self, freq, dtstart=None, + interval=1, wkst=None, count=None, until=None, bysetpos=None, + bymonth=None, bymonthday=None, byyearday=None, byeaster=None, + byweekno=None, byweekday=None, + byhour=None, byminute=None, bysecond=None, + cache=False): + super(rrule, self).__init__(cache) + global easter + if not dtstart: + if until and until.tzinfo: + dtstart = datetime.datetime.now(tz=until.tzinfo).replace(microsecond=0) + else: + dtstart = datetime.datetime.now().replace(microsecond=0) + elif not isinstance(dtstart, datetime.datetime): + dtstart = datetime.datetime.fromordinal(dtstart.toordinal()) + else: + dtstart = dtstart.replace(microsecond=0) + self._dtstart = dtstart + self._tzinfo = dtstart.tzinfo + self._freq = freq + self._interval = interval + self._count = count + + # Cache the original byxxx rules, if they are provided, as the _byxxx + # attributes do not necessarily map to the inputs, and this can be + # a problem in generating the strings. Only store things if they've + # been supplied (the string retrieval will just use .get()) + self._original_rule = {} + + if until and not isinstance(until, datetime.datetime): + until = datetime.datetime.fromordinal(until.toordinal()) + self._until = until + + if self._dtstart and self._until: + if (self._dtstart.tzinfo is not None) != (self._until.tzinfo is not None): + # According to RFC5545 Section 3.3.10: + # https://tools.ietf.org/html/rfc5545#section-3.3.10 + # + # > If the "DTSTART" property is specified as a date with UTC + # > time or a date with local time and time zone reference, + # > then the UNTIL rule part MUST be specified as a date with + # > UTC time. + raise ValueError( + 'RRULE UNTIL values must be specified in UTC when DTSTART ' + 'is timezone-aware' + ) + + if count is not None and until: + warn("Using both 'count' and 'until' is inconsistent with RFC 5545" + " and has been deprecated in dateutil. Future versions will " + "raise an error.", DeprecationWarning) + + if wkst is None: + self._wkst = calendar.firstweekday() + elif isinstance(wkst, integer_types): + self._wkst = wkst + else: + self._wkst = wkst.weekday + + if bysetpos is None: + self._bysetpos = None + elif isinstance(bysetpos, integer_types): + if bysetpos == 0 or not (-366 <= bysetpos <= 366): + raise ValueError("bysetpos must be between 1 and 366, " + "or between -366 and -1") + self._bysetpos = (bysetpos,) + else: + self._bysetpos = tuple(bysetpos) + for pos in self._bysetpos: + if pos == 0 or not (-366 <= pos <= 366): + raise ValueError("bysetpos must be between 1 and 366, " + "or between -366 and -1") + + if self._bysetpos: + self._original_rule['bysetpos'] = self._bysetpos + + if (byweekno is None and byyearday is None and bymonthday is None and + byweekday is None and byeaster is None): + if freq == YEARLY: + if bymonth is None: + bymonth = dtstart.month + self._original_rule['bymonth'] = None + bymonthday = dtstart.day + self._original_rule['bymonthday'] = None + elif freq == MONTHLY: + bymonthday = dtstart.day + self._original_rule['bymonthday'] = None + elif freq == WEEKLY: + byweekday = dtstart.weekday() + self._original_rule['byweekday'] = None + + # bymonth + if bymonth is None: + self._bymonth = None + else: + if isinstance(bymonth, integer_types): + bymonth = (bymonth,) + + self._bymonth = tuple(sorted(set(bymonth))) + + if 'bymonth' not in self._original_rule: + self._original_rule['bymonth'] = self._bymonth + + # byyearday + if byyearday is None: + self._byyearday = None + else: + if isinstance(byyearday, integer_types): + byyearday = (byyearday,) + + self._byyearday = tuple(sorted(set(byyearday))) + self._original_rule['byyearday'] = self._byyearday + + # byeaster + if byeaster is not None: + if not easter: + from dateutil import easter + if isinstance(byeaster, integer_types): + self._byeaster = (byeaster,) + else: + self._byeaster = tuple(sorted(byeaster)) + + self._original_rule['byeaster'] = self._byeaster + else: + self._byeaster = None + + # bymonthday + if bymonthday is None: + self._bymonthday = () + self._bynmonthday = () + else: + if isinstance(bymonthday, integer_types): + bymonthday = (bymonthday,) + + bymonthday = set(bymonthday) # Ensure it's unique + + self._bymonthday = tuple(sorted(x for x in bymonthday if x > 0)) + self._bynmonthday = tuple(sorted(x for x in bymonthday if x < 0)) + + # Storing positive numbers first, then negative numbers + if 'bymonthday' not in self._original_rule: + self._original_rule['bymonthday'] = tuple( + itertools.chain(self._bymonthday, self._bynmonthday)) + + # byweekno + if byweekno is None: + self._byweekno = None + else: + if isinstance(byweekno, integer_types): + byweekno = (byweekno,) + + self._byweekno = tuple(sorted(set(byweekno))) + + self._original_rule['byweekno'] = self._byweekno + + # byweekday / bynweekday + if byweekday is None: + self._byweekday = None + self._bynweekday = None + else: + # If it's one of the valid non-sequence types, convert to a + # single-element sequence before the iterator that builds the + # byweekday set. + if isinstance(byweekday, integer_types) or hasattr(byweekday, "n"): + byweekday = (byweekday,) + + self._byweekday = set() + self._bynweekday = set() + for wday in byweekday: + if isinstance(wday, integer_types): + self._byweekday.add(wday) + elif not wday.n or freq > MONTHLY: + self._byweekday.add(wday.weekday) + else: + self._bynweekday.add((wday.weekday, wday.n)) + + if not self._byweekday: + self._byweekday = None + elif not self._bynweekday: + self._bynweekday = None + + if self._byweekday is not None: + self._byweekday = tuple(sorted(self._byweekday)) + orig_byweekday = [weekday(x) for x in self._byweekday] + else: + orig_byweekday = () + + if self._bynweekday is not None: + self._bynweekday = tuple(sorted(self._bynweekday)) + orig_bynweekday = [weekday(*x) for x in self._bynweekday] + else: + orig_bynweekday = () + + if 'byweekday' not in self._original_rule: + self._original_rule['byweekday'] = tuple(itertools.chain( + orig_byweekday, orig_bynweekday)) + + # byhour + if byhour is None: + if freq < HOURLY: + self._byhour = {dtstart.hour} + else: + self._byhour = None + else: + if isinstance(byhour, integer_types): + byhour = (byhour,) + + if freq == HOURLY: + self._byhour = self.__construct_byset(start=dtstart.hour, + byxxx=byhour, + base=24) + else: + self._byhour = set(byhour) + + self._byhour = tuple(sorted(self._byhour)) + self._original_rule['byhour'] = self._byhour + + # byminute + if byminute is None: + if freq < MINUTELY: + self._byminute = {dtstart.minute} + else: + self._byminute = None + else: + if isinstance(byminute, integer_types): + byminute = (byminute,) + + if freq == MINUTELY: + self._byminute = self.__construct_byset(start=dtstart.minute, + byxxx=byminute, + base=60) + else: + self._byminute = set(byminute) + + self._byminute = tuple(sorted(self._byminute)) + self._original_rule['byminute'] = self._byminute + + # bysecond + if bysecond is None: + if freq < SECONDLY: + self._bysecond = ((dtstart.second,)) + else: + self._bysecond = None + else: + if isinstance(bysecond, integer_types): + bysecond = (bysecond,) + + self._bysecond = set(bysecond) + + if freq == SECONDLY: + self._bysecond = self.__construct_byset(start=dtstart.second, + byxxx=bysecond, + base=60) + else: + self._bysecond = set(bysecond) + + self._bysecond = tuple(sorted(self._bysecond)) + self._original_rule['bysecond'] = self._bysecond + + if self._freq >= HOURLY: + self._timeset = None + else: + self._timeset = [] + for hour in self._byhour: + for minute in self._byminute: + for second in self._bysecond: + self._timeset.append( + datetime.time(hour, minute, second, + tzinfo=self._tzinfo)) + self._timeset.sort() + self._timeset = tuple(self._timeset) + + def __str__(self): + """ + Output a string that would generate this RRULE if passed to rrulestr. + This is mostly compatible with RFC5545, except for the + dateutil-specific extension BYEASTER. + """ + + output = [] + h, m, s = [None] * 3 + if self._dtstart: + output.append(self._dtstart.strftime('DTSTART:%Y%m%dT%H%M%S')) + h, m, s = self._dtstart.timetuple()[3:6] + + parts = ['FREQ=' + FREQNAMES[self._freq]] + if self._interval != 1: + parts.append('INTERVAL=' + str(self._interval)) + + if self._wkst: + parts.append('WKST=' + repr(weekday(self._wkst))[0:2]) + + if self._count is not None: + parts.append('COUNT=' + str(self._count)) + + if self._until: + parts.append(self._until.strftime('UNTIL=%Y%m%dT%H%M%S')) + + if self._original_rule.get('byweekday') is not None: + # The str() method on weekday objects doesn't generate + # RFC5545-compliant strings, so we should modify that. + original_rule = dict(self._original_rule) + wday_strings = [] + for wday in original_rule['byweekday']: + if wday.n: + wday_strings.append('{n:+d}{wday}'.format( + n=wday.n, + wday=repr(wday)[0:2])) + else: + wday_strings.append(repr(wday)) + + original_rule['byweekday'] = wday_strings + else: + original_rule = self._original_rule + + partfmt = '{name}={vals}' + for name, key in [('BYSETPOS', 'bysetpos'), + ('BYMONTH', 'bymonth'), + ('BYMONTHDAY', 'bymonthday'), + ('BYYEARDAY', 'byyearday'), + ('BYWEEKNO', 'byweekno'), + ('BYDAY', 'byweekday'), + ('BYHOUR', 'byhour'), + ('BYMINUTE', 'byminute'), + ('BYSECOND', 'bysecond'), + ('BYEASTER', 'byeaster')]: + value = original_rule.get(key) + if value: + parts.append(partfmt.format(name=name, vals=(','.join(str(v) + for v in value)))) + + output.append('RRULE:' + ';'.join(parts)) + return '\n'.join(output) + + def replace(self, **kwargs): + """Return new rrule with same attributes except for those attributes given new + values by whichever keyword arguments are specified.""" + new_kwargs = {"interval": self._interval, + "count": self._count, + "dtstart": self._dtstart, + "freq": self._freq, + "until": self._until, + "wkst": self._wkst, + "cache": False if self._cache is None else True } + new_kwargs.update(self._original_rule) + new_kwargs.update(kwargs) + return rrule(**new_kwargs) + + def _iter(self): + year, month, day, hour, minute, second, weekday, yearday, _ = \ + self._dtstart.timetuple() + + # Some local variables to speed things up a bit + freq = self._freq + interval = self._interval + wkst = self._wkst + until = self._until + bymonth = self._bymonth + byweekno = self._byweekno + byyearday = self._byyearday + byweekday = self._byweekday + byeaster = self._byeaster + bymonthday = self._bymonthday + bynmonthday = self._bynmonthday + bysetpos = self._bysetpos + byhour = self._byhour + byminute = self._byminute + bysecond = self._bysecond + + ii = _iterinfo(self) + ii.rebuild(year, month) + + getdayset = {YEARLY: ii.ydayset, + MONTHLY: ii.mdayset, + WEEKLY: ii.wdayset, + DAILY: ii.ddayset, + HOURLY: ii.ddayset, + MINUTELY: ii.ddayset, + SECONDLY: ii.ddayset}[freq] + + if freq < HOURLY: + timeset = self._timeset + else: + gettimeset = {HOURLY: ii.htimeset, + MINUTELY: ii.mtimeset, + SECONDLY: ii.stimeset}[freq] + if ((freq >= HOURLY and + self._byhour and hour not in self._byhour) or + (freq >= MINUTELY and + self._byminute and minute not in self._byminute) or + (freq >= SECONDLY and + self._bysecond and second not in self._bysecond)): + timeset = () + else: + timeset = gettimeset(hour, minute, second) + + total = 0 + count = self._count + while True: + # Get dayset with the right frequency + dayset, start, end = getdayset(year, month, day) + + # Do the "hard" work ;-) + filtered = False + for i in dayset[start:end]: + if ((bymonth and ii.mmask[i] not in bymonth) or + (byweekno and not ii.wnomask[i]) or + (byweekday and ii.wdaymask[i] not in byweekday) or + (ii.nwdaymask and not ii.nwdaymask[i]) or + (byeaster and not ii.eastermask[i]) or + ((bymonthday or bynmonthday) and + ii.mdaymask[i] not in bymonthday and + ii.nmdaymask[i] not in bynmonthday) or + (byyearday and + ((i < ii.yearlen and i+1 not in byyearday and + -ii.yearlen+i not in byyearday) or + (i >= ii.yearlen and i+1-ii.yearlen not in byyearday and + -ii.nextyearlen+i-ii.yearlen not in byyearday)))): + dayset[i] = None + filtered = True + + # Output results + if bysetpos and timeset: + poslist = [] + for pos in bysetpos: + if pos < 0: + daypos, timepos = divmod(pos, len(timeset)) + else: + daypos, timepos = divmod(pos-1, len(timeset)) + try: + i = [x for x in dayset[start:end] + if x is not None][daypos] + time = timeset[timepos] + except IndexError: + pass + else: + date = datetime.date.fromordinal(ii.yearordinal+i) + res = datetime.datetime.combine(date, time) + if res not in poslist: + poslist.append(res) + poslist.sort() + for res in poslist: + if until and res > until: + self._len = total + return + elif res >= self._dtstart: + if count is not None: + count -= 1 + if count < 0: + self._len = total + return + total += 1 + yield res + else: + for i in dayset[start:end]: + if i is not None: + date = datetime.date.fromordinal(ii.yearordinal + i) + for time in timeset: + res = datetime.datetime.combine(date, time) + if until and res > until: + self._len = total + return + elif res >= self._dtstart: + if count is not None: + count -= 1 + if count < 0: + self._len = total + return + + total += 1 + yield res + + # Handle frequency and interval + fixday = False + if freq == YEARLY: + year += interval + if year > datetime.MAXYEAR: + self._len = total + return + ii.rebuild(year, month) + elif freq == MONTHLY: + month += interval + if month > 12: + div, mod = divmod(month, 12) + month = mod + year += div + if month == 0: + month = 12 + year -= 1 + if year > datetime.MAXYEAR: + self._len = total + return + ii.rebuild(year, month) + elif freq == WEEKLY: + if wkst > weekday: + day += -(weekday+1+(6-wkst))+self._interval*7 + else: + day += -(weekday-wkst)+self._interval*7 + weekday = wkst + fixday = True + elif freq == DAILY: + day += interval + fixday = True + elif freq == HOURLY: + if filtered: + # Jump to one iteration before next day + hour += ((23-hour)//interval)*interval + + if byhour: + ndays, hour = self.__mod_distance(value=hour, + byxxx=self._byhour, + base=24) + else: + ndays, hour = divmod(hour+interval, 24) + + if ndays: + day += ndays + fixday = True + + timeset = gettimeset(hour, minute, second) + elif freq == MINUTELY: + if filtered: + # Jump to one iteration before next day + minute += ((1439-(hour*60+minute))//interval)*interval + + valid = False + rep_rate = (24*60) + for j in range(rep_rate // gcd(interval, rep_rate)): + if byminute: + nhours, minute = \ + self.__mod_distance(value=minute, + byxxx=self._byminute, + base=60) + else: + nhours, minute = divmod(minute+interval, 60) + + div, hour = divmod(hour+nhours, 24) + if div: + day += div + fixday = True + filtered = False + + if not byhour or hour in byhour: + valid = True + break + + if not valid: + raise ValueError('Invalid combination of interval and ' + + 'byhour resulting in empty rule.') + + timeset = gettimeset(hour, minute, second) + elif freq == SECONDLY: + if filtered: + # Jump to one iteration before next day + second += (((86399 - (hour * 3600 + minute * 60 + second)) + // interval) * interval) + + rep_rate = (24 * 3600) + valid = False + for j in range(0, rep_rate // gcd(interval, rep_rate)): + if bysecond: + nminutes, second = \ + self.__mod_distance(value=second, + byxxx=self._bysecond, + base=60) + else: + nminutes, second = divmod(second+interval, 60) + + div, minute = divmod(minute+nminutes, 60) + if div: + hour += div + div, hour = divmod(hour, 24) + if div: + day += div + fixday = True + + if ((not byhour or hour in byhour) and + (not byminute or minute in byminute) and + (not bysecond or second in bysecond)): + valid = True + break + + if not valid: + raise ValueError('Invalid combination of interval, ' + + 'byhour and byminute resulting in empty' + + ' rule.') + + timeset = gettimeset(hour, minute, second) + + if fixday and day > 28: + daysinmonth = calendar.monthrange(year, month)[1] + if day > daysinmonth: + while day > daysinmonth: + day -= daysinmonth + month += 1 + if month == 13: + month = 1 + year += 1 + if year > datetime.MAXYEAR: + self._len = total + return + daysinmonth = calendar.monthrange(year, month)[1] + ii.rebuild(year, month) + + def __construct_byset(self, start, byxxx, base): + """ + If a `BYXXX` sequence is passed to the constructor at the same level as + `FREQ` (e.g. `FREQ=HOURLY,BYHOUR={2,4,7},INTERVAL=3`), there are some + specifications which cannot be reached given some starting conditions. + + This occurs whenever the interval is not coprime with the base of a + given unit and the difference between the starting position and the + ending position is not coprime with the greatest common denominator + between the interval and the base. For example, with a FREQ of hourly + starting at 17:00 and an interval of 4, the only valid values for + BYHOUR would be {21, 1, 5, 9, 13, 17}, because 4 and 24 are not + coprime. + + :param start: + Specifies the starting position. + :param byxxx: + An iterable containing the list of allowed values. + :param base: + The largest allowable value for the specified frequency (e.g. + 24 hours, 60 minutes). + + This does not preserve the type of the iterable, returning a set, since + the values should be unique and the order is irrelevant, this will + speed up later lookups. + + In the event of an empty set, raises a :exception:`ValueError`, as this + results in an empty rrule. + """ + + cset = set() + + # Support a single byxxx value. + if isinstance(byxxx, integer_types): + byxxx = (byxxx, ) + + for num in byxxx: + i_gcd = gcd(self._interval, base) + # Use divmod rather than % because we need to wrap negative nums. + if i_gcd == 1 or divmod(num - start, i_gcd)[1] == 0: + cset.add(num) + + if len(cset) == 0: + raise ValueError("Invalid rrule byxxx generates an empty set.") + + return cset + + def __mod_distance(self, value, byxxx, base): + """ + Calculates the next value in a sequence where the `FREQ` parameter is + specified along with a `BYXXX` parameter at the same "level" + (e.g. `HOURLY` specified with `BYHOUR`). + + :param value: + The old value of the component. + :param byxxx: + The `BYXXX` set, which should have been generated by + `rrule._construct_byset`, or something else which checks that a + valid rule is present. + :param base: + The largest allowable value for the specified frequency (e.g. + 24 hours, 60 minutes). + + If a valid value is not found after `base` iterations (the maximum + number before the sequence would start to repeat), this raises a + :exception:`ValueError`, as no valid values were found. + + This returns a tuple of `divmod(n*interval, base)`, where `n` is the + smallest number of `interval` repetitions until the next specified + value in `byxxx` is found. + """ + accumulator = 0 + for ii in range(1, base + 1): + # Using divmod() over % to account for negative intervals + div, value = divmod(value + self._interval, base) + accumulator += div + if value in byxxx: + return (accumulator, value) + + +class _iterinfo(object): + __slots__ = ["rrule", "lastyear", "lastmonth", + "yearlen", "nextyearlen", "yearordinal", "yearweekday", + "mmask", "mrange", "mdaymask", "nmdaymask", + "wdaymask", "wnomask", "nwdaymask", "eastermask"] + + def __init__(self, rrule): + for attr in self.__slots__: + setattr(self, attr, None) + self.rrule = rrule + + def rebuild(self, year, month): + # Every mask is 7 days longer to handle cross-year weekly periods. + rr = self.rrule + if year != self.lastyear: + self.yearlen = 365 + calendar.isleap(year) + self.nextyearlen = 365 + calendar.isleap(year + 1) + firstyday = datetime.date(year, 1, 1) + self.yearordinal = firstyday.toordinal() + self.yearweekday = firstyday.weekday() + + wday = datetime.date(year, 1, 1).weekday() + if self.yearlen == 365: + self.mmask = M365MASK + self.mdaymask = MDAY365MASK + self.nmdaymask = NMDAY365MASK + self.wdaymask = WDAYMASK[wday:] + self.mrange = M365RANGE + else: + self.mmask = M366MASK + self.mdaymask = MDAY366MASK + self.nmdaymask = NMDAY366MASK + self.wdaymask = WDAYMASK[wday:] + self.mrange = M366RANGE + + if not rr._byweekno: + self.wnomask = None + else: + self.wnomask = [0]*(self.yearlen+7) + # no1wkst = firstwkst = self.wdaymask.index(rr._wkst) + no1wkst = firstwkst = (7-self.yearweekday+rr._wkst) % 7 + if no1wkst >= 4: + no1wkst = 0 + # Number of days in the year, plus the days we got + # from last year. + wyearlen = self.yearlen+(self.yearweekday-rr._wkst) % 7 + else: + # Number of days in the year, minus the days we + # left in last year. + wyearlen = self.yearlen-no1wkst + div, mod = divmod(wyearlen, 7) + numweeks = div+mod//4 + for n in rr._byweekno: + if n < 0: + n += numweeks+1 + if not (0 < n <= numweeks): + continue + if n > 1: + i = no1wkst+(n-1)*7 + if no1wkst != firstwkst: + i -= 7-firstwkst + else: + i = no1wkst + for j in range(7): + self.wnomask[i] = 1 + i += 1 + if self.wdaymask[i] == rr._wkst: + break + if 1 in rr._byweekno: + # Check week number 1 of next year as well + # TODO: Check -numweeks for next year. + i = no1wkst+numweeks*7 + if no1wkst != firstwkst: + i -= 7-firstwkst + if i < self.yearlen: + # If week starts in next year, we + # don't care about it. + for j in range(7): + self.wnomask[i] = 1 + i += 1 + if self.wdaymask[i] == rr._wkst: + break + if no1wkst: + # Check last week number of last year as + # well. If no1wkst is 0, either the year + # started on week start, or week number 1 + # got days from last year, so there are no + # days from last year's last week number in + # this year. + if -1 not in rr._byweekno: + lyearweekday = datetime.date(year-1, 1, 1).weekday() + lno1wkst = (7-lyearweekday+rr._wkst) % 7 + lyearlen = 365+calendar.isleap(year-1) + if lno1wkst >= 4: + lno1wkst = 0 + lnumweeks = 52+(lyearlen + + (lyearweekday-rr._wkst) % 7) % 7//4 + else: + lnumweeks = 52+(self.yearlen-no1wkst) % 7//4 + else: + lnumweeks = -1 + if lnumweeks in rr._byweekno: + for i in range(no1wkst): + self.wnomask[i] = 1 + + if (rr._bynweekday and (month != self.lastmonth or + year != self.lastyear)): + ranges = [] + if rr._freq == YEARLY: + if rr._bymonth: + for month in rr._bymonth: + ranges.append(self.mrange[month-1:month+1]) + else: + ranges = [(0, self.yearlen)] + elif rr._freq == MONTHLY: + ranges = [self.mrange[month-1:month+1]] + if ranges: + # Weekly frequency won't get here, so we may not + # care about cross-year weekly periods. + self.nwdaymask = [0]*self.yearlen + for first, last in ranges: + last -= 1 + for wday, n in rr._bynweekday: + if n < 0: + i = last+(n+1)*7 + i -= (self.wdaymask[i]-wday) % 7 + else: + i = first+(n-1)*7 + i += (7-self.wdaymask[i]+wday) % 7 + if first <= i <= last: + self.nwdaymask[i] = 1 + + if rr._byeaster: + self.eastermask = [0]*(self.yearlen+7) + eyday = easter.easter(year).toordinal()-self.yearordinal + for offset in rr._byeaster: + self.eastermask[eyday+offset] = 1 + + self.lastyear = year + self.lastmonth = month + + def ydayset(self, year, month, day): + return list(range(self.yearlen)), 0, self.yearlen + + def mdayset(self, year, month, day): + dset = [None]*self.yearlen + start, end = self.mrange[month-1:month+1] + for i in range(start, end): + dset[i] = i + return dset, start, end + + def wdayset(self, year, month, day): + # We need to handle cross-year weeks here. + dset = [None]*(self.yearlen+7) + i = datetime.date(year, month, day).toordinal()-self.yearordinal + start = i + for j in range(7): + dset[i] = i + i += 1 + # if (not (0 <= i < self.yearlen) or + # self.wdaymask[i] == self.rrule._wkst): + # This will cross the year boundary, if necessary. + if self.wdaymask[i] == self.rrule._wkst: + break + return dset, start, i + + def ddayset(self, year, month, day): + dset = [None] * self.yearlen + i = datetime.date(year, month, day).toordinal() - self.yearordinal + dset[i] = i + return dset, i, i + 1 + + def htimeset(self, hour, minute, second): + tset = [] + rr = self.rrule + for minute in rr._byminute: + for second in rr._bysecond: + tset.append(datetime.time(hour, minute, second, + tzinfo=rr._tzinfo)) + tset.sort() + return tset + + def mtimeset(self, hour, minute, second): + tset = [] + rr = self.rrule + for second in rr._bysecond: + tset.append(datetime.time(hour, minute, second, tzinfo=rr._tzinfo)) + tset.sort() + return tset + + def stimeset(self, hour, minute, second): + return (datetime.time(hour, minute, second, + tzinfo=self.rrule._tzinfo),) + + +class rruleset(rrulebase): + """ The rruleset type allows more complex recurrence setups, mixing + multiple rules, dates, exclusion rules, and exclusion dates. The type + constructor takes the following keyword arguments: + + :param cache: If True, caching of results will be enabled, improving + performance of multiple queries considerably. """ + + class _genitem(object): + def __init__(self, genlist, gen): + try: + self.dt = advance_iterator(gen) + genlist.append(self) + except StopIteration: + pass + self.genlist = genlist + self.gen = gen + + def __next__(self): + try: + self.dt = advance_iterator(self.gen) + except StopIteration: + if self.genlist[0] is self: + heapq.heappop(self.genlist) + else: + self.genlist.remove(self) + heapq.heapify(self.genlist) + + next = __next__ + + def __lt__(self, other): + return self.dt < other.dt + + def __gt__(self, other): + return self.dt > other.dt + + def __eq__(self, other): + return self.dt == other.dt + + def __ne__(self, other): + return self.dt != other.dt + + def __init__(self, cache=False): + super(rruleset, self).__init__(cache) + self._rrule = [] + self._rdate = [] + self._exrule = [] + self._exdate = [] + + @_invalidates_cache + def rrule(self, rrule): + """ Include the given :py:class:`rrule` instance in the recurrence set + generation. """ + self._rrule.append(rrule) + + @_invalidates_cache + def rdate(self, rdate): + """ Include the given :py:class:`datetime` instance in the recurrence + set generation. """ + self._rdate.append(rdate) + + @_invalidates_cache + def exrule(self, exrule): + """ Include the given rrule instance in the recurrence set exclusion + list. Dates which are part of the given recurrence rules will not + be generated, even if some inclusive rrule or rdate matches them. + """ + self._exrule.append(exrule) + + @_invalidates_cache + def exdate(self, exdate): + """ Include the given datetime instance in the recurrence set + exclusion list. Dates included that way will not be generated, + even if some inclusive rrule or rdate matches them. """ + self._exdate.append(exdate) + + def _iter(self): + rlist = [] + self._rdate.sort() + self._genitem(rlist, iter(self._rdate)) + for gen in [iter(x) for x in self._rrule]: + self._genitem(rlist, gen) + exlist = [] + self._exdate.sort() + self._genitem(exlist, iter(self._exdate)) + for gen in [iter(x) for x in self._exrule]: + self._genitem(exlist, gen) + lastdt = None + total = 0 + heapq.heapify(rlist) + heapq.heapify(exlist) + while rlist: + ritem = rlist[0] + if not lastdt or lastdt != ritem.dt: + while exlist and exlist[0] < ritem: + exitem = exlist[0] + advance_iterator(exitem) + if exlist and exlist[0] is exitem: + heapq.heapreplace(exlist, exitem) + if not exlist or ritem != exlist[0]: + total += 1 + yield ritem.dt + lastdt = ritem.dt + advance_iterator(ritem) + if rlist and rlist[0] is ritem: + heapq.heapreplace(rlist, ritem) + self._len = total + + + + +class _rrulestr(object): + """ Parses a string representation of a recurrence rule or set of + recurrence rules. + + :param s: + Required, a string defining one or more recurrence rules. + + :param dtstart: + If given, used as the default recurrence start if not specified in the + rule string. + + :param cache: + If set ``True`` caching of results will be enabled, improving + performance of multiple queries considerably. + + :param unfold: + If set ``True`` indicates that a rule string is split over more + than one line and should be joined before processing. + + :param forceset: + If set ``True`` forces a :class:`dateutil.rrule.rruleset` to + be returned. + + :param compatible: + If set ``True`` forces ``unfold`` and ``forceset`` to be ``True``. + + :param ignoretz: + If set ``True``, time zones in parsed strings are ignored and a naive + :class:`datetime.datetime` object is returned. + + :param tzids: + If given, a callable or mapping used to retrieve a + :class:`datetime.tzinfo` from a string representation. + Defaults to :func:`dateutil.tz.gettz`. + + :param tzinfos: + Additional time zone names / aliases which may be present in a string + representation. See :func:`dateutil.parser.parse` for more + information. + + :return: + Returns a :class:`dateutil.rrule.rruleset` or + :class:`dateutil.rrule.rrule` + """ + + _freq_map = {"YEARLY": YEARLY, + "MONTHLY": MONTHLY, + "WEEKLY": WEEKLY, + "DAILY": DAILY, + "HOURLY": HOURLY, + "MINUTELY": MINUTELY, + "SECONDLY": SECONDLY} + + _weekday_map = {"MO": 0, "TU": 1, "WE": 2, "TH": 3, + "FR": 4, "SA": 5, "SU": 6} + + def _handle_int(self, rrkwargs, name, value, **kwargs): + rrkwargs[name.lower()] = int(value) + + def _handle_int_list(self, rrkwargs, name, value, **kwargs): + rrkwargs[name.lower()] = [int(x) for x in value.split(',')] + + _handle_INTERVAL = _handle_int + _handle_COUNT = _handle_int + _handle_BYSETPOS = _handle_int_list + _handle_BYMONTH = _handle_int_list + _handle_BYMONTHDAY = _handle_int_list + _handle_BYYEARDAY = _handle_int_list + _handle_BYEASTER = _handle_int_list + _handle_BYWEEKNO = _handle_int_list + _handle_BYHOUR = _handle_int_list + _handle_BYMINUTE = _handle_int_list + _handle_BYSECOND = _handle_int_list + + def _handle_FREQ(self, rrkwargs, name, value, **kwargs): + rrkwargs["freq"] = self._freq_map[value] + + def _handle_UNTIL(self, rrkwargs, name, value, **kwargs): + global parser + if not parser: + from dateutil import parser + try: + rrkwargs["until"] = parser.parse(value, + ignoretz=kwargs.get("ignoretz"), + tzinfos=kwargs.get("tzinfos")) + except ValueError: + raise ValueError("invalid until date") + + def _handle_WKST(self, rrkwargs, name, value, **kwargs): + rrkwargs["wkst"] = self._weekday_map[value] + + def _handle_BYWEEKDAY(self, rrkwargs, name, value, **kwargs): + """ + Two ways to specify this: +1MO or MO(+1) + """ + l = [] + for wday in value.split(','): + if '(' in wday: + # If it's of the form TH(+1), etc. + splt = wday.split('(') + w = splt[0] + n = int(splt[1][:-1]) + elif len(wday): + # If it's of the form +1MO + for i in range(len(wday)): + if wday[i] not in '+-0123456789': + break + n = wday[:i] or None + w = wday[i:] + if n: + n = int(n) + else: + raise ValueError("Invalid (empty) BYDAY specification.") + + l.append(weekdays[self._weekday_map[w]](n)) + rrkwargs["byweekday"] = l + + _handle_BYDAY = _handle_BYWEEKDAY + + def _parse_rfc_rrule(self, line, + dtstart=None, + cache=False, + ignoretz=False, + tzinfos=None): + if line.find(':') != -1: + name, value = line.split(':') + if name != "RRULE": + raise ValueError("unknown parameter name") + else: + value = line + rrkwargs = {} + for pair in value.split(';'): + name, value = pair.split('=') + name = name.upper() + value = value.upper() + try: + getattr(self, "_handle_"+name)(rrkwargs, name, value, + ignoretz=ignoretz, + tzinfos=tzinfos) + except AttributeError: + raise ValueError("unknown parameter '%s'" % name) + except (KeyError, ValueError): + raise ValueError("invalid '%s': %s" % (name, value)) + return rrule(dtstart=dtstart, cache=cache, **rrkwargs) + + def _parse_date_value(self, date_value, parms, rule_tzids, + ignoretz, tzids, tzinfos): + global parser + if not parser: + from dateutil import parser + + datevals = [] + value_found = False + TZID = None + + for parm in parms: + if parm.startswith("TZID="): + try: + tzkey = rule_tzids[parm.split('TZID=')[-1]] + except KeyError: + continue + if tzids is None: + from . import tz + tzlookup = tz.gettz + elif callable(tzids): + tzlookup = tzids + else: + tzlookup = getattr(tzids, 'get', None) + if tzlookup is None: + msg = ('tzids must be a callable, mapping, or None, ' + 'not %s' % tzids) + raise ValueError(msg) + + TZID = tzlookup(tzkey) + continue + + # RFC 5445 3.8.2.4: The VALUE parameter is optional, but may be found + # only once. + if parm not in {"VALUE=DATE-TIME", "VALUE=DATE"}: + raise ValueError("unsupported parm: " + parm) + else: + if value_found: + msg = ("Duplicate value parameter found in: " + parm) + raise ValueError(msg) + value_found = True + + for datestr in date_value.split(','): + date = parser.parse(datestr, ignoretz=ignoretz, tzinfos=tzinfos) + if TZID is not None: + if date.tzinfo is None: + date = date.replace(tzinfo=TZID) + else: + raise ValueError('DTSTART/EXDATE specifies multiple timezone') + datevals.append(date) + + return datevals + + def _parse_rfc(self, s, + dtstart=None, + cache=False, + unfold=False, + forceset=False, + compatible=False, + ignoretz=False, + tzids=None, + tzinfos=None): + global parser + if compatible: + forceset = True + unfold = True + + TZID_NAMES = dict(map( + lambda x: (x.upper(), x), + re.findall('TZID=(?P[^:]+):', s) + )) + s = s.upper() + if not s.strip(): + raise ValueError("empty string") + if unfold: + lines = s.splitlines() + i = 0 + while i < len(lines): + line = lines[i].rstrip() + if not line: + del lines[i] + elif i > 0 and line[0] == " ": + lines[i-1] += line[1:] + del lines[i] + else: + i += 1 + else: + lines = s.split() + if (not forceset and len(lines) == 1 and (s.find(':') == -1 or + s.startswith('RRULE:'))): + return self._parse_rfc_rrule(lines[0], cache=cache, + dtstart=dtstart, ignoretz=ignoretz, + tzinfos=tzinfos) + else: + rrulevals = [] + rdatevals = [] + exrulevals = [] + exdatevals = [] + for line in lines: + if not line: + continue + if line.find(':') == -1: + name = "RRULE" + value = line + else: + name, value = line.split(':', 1) + parms = name.split(';') + if not parms: + raise ValueError("empty property name") + name = parms[0] + parms = parms[1:] + if name == "RRULE": + for parm in parms: + raise ValueError("unsupported RRULE parm: "+parm) + rrulevals.append(value) + elif name == "RDATE": + for parm in parms: + if parm != "VALUE=DATE-TIME": + raise ValueError("unsupported RDATE parm: "+parm) + rdatevals.append(value) + elif name == "EXRULE": + for parm in parms: + raise ValueError("unsupported EXRULE parm: "+parm) + exrulevals.append(value) + elif name == "EXDATE": + exdatevals.extend( + self._parse_date_value(value, parms, + TZID_NAMES, ignoretz, + tzids, tzinfos) + ) + elif name == "DTSTART": + dtvals = self._parse_date_value(value, parms, TZID_NAMES, + ignoretz, tzids, tzinfos) + if len(dtvals) != 1: + raise ValueError("Multiple DTSTART values specified:" + + value) + dtstart = dtvals[0] + else: + raise ValueError("unsupported property: "+name) + if (forceset or len(rrulevals) > 1 or rdatevals + or exrulevals or exdatevals): + if not parser and (rdatevals or exdatevals): + from dateutil import parser + rset = rruleset(cache=cache) + for value in rrulevals: + rset.rrule(self._parse_rfc_rrule(value, dtstart=dtstart, + ignoretz=ignoretz, + tzinfos=tzinfos)) + for value in rdatevals: + for datestr in value.split(','): + rset.rdate(parser.parse(datestr, + ignoretz=ignoretz, + tzinfos=tzinfos)) + for value in exrulevals: + rset.exrule(self._parse_rfc_rrule(value, dtstart=dtstart, + ignoretz=ignoretz, + tzinfos=tzinfos)) + for value in exdatevals: + rset.exdate(value) + if compatible and dtstart: + rset.rdate(dtstart) + return rset + else: + return self._parse_rfc_rrule(rrulevals[0], + dtstart=dtstart, + cache=cache, + ignoretz=ignoretz, + tzinfos=tzinfos) + + def __call__(self, s, **kwargs): + return self._parse_rfc(s, **kwargs) + + +rrulestr = _rrulestr() + +# vim:ts=4:sw=4:et diff --git a/third_party/python/python_dateutil/dateutil/tz/__init__.py b/third_party/python/python_dateutil/dateutil/tz/__init__.py new file mode 100644 index 0000000000000..af1352c47292f --- /dev/null +++ b/third_party/python/python_dateutil/dateutil/tz/__init__.py @@ -0,0 +1,12 @@ +# -*- coding: utf-8 -*- +from .tz import * +from .tz import __doc__ + +__all__ = ["tzutc", "tzoffset", "tzlocal", "tzfile", "tzrange", + "tzstr", "tzical", "tzwin", "tzwinlocal", "gettz", + "enfold", "datetime_ambiguous", "datetime_exists", + "resolve_imaginary", "UTC", "DeprecatedTzFormatWarning"] + + +class DeprecatedTzFormatWarning(Warning): + """Warning raised when time zones are parsed from deprecated formats.""" diff --git a/third_party/python/python_dateutil/dateutil/tz/_common.py b/third_party/python/python_dateutil/dateutil/tz/_common.py new file mode 100644 index 0000000000000..e6ac11831522b --- /dev/null +++ b/third_party/python/python_dateutil/dateutil/tz/_common.py @@ -0,0 +1,419 @@ +from six import PY2 + +from functools import wraps + +from datetime import datetime, timedelta, tzinfo + + +ZERO = timedelta(0) + +__all__ = ['tzname_in_python2', 'enfold'] + + +def tzname_in_python2(namefunc): + """Change unicode output into bytestrings in Python 2 + + tzname() API changed in Python 3. It used to return bytes, but was changed + to unicode strings + """ + if PY2: + @wraps(namefunc) + def adjust_encoding(*args, **kwargs): + name = namefunc(*args, **kwargs) + if name is not None: + name = name.encode() + + return name + + return adjust_encoding + else: + return namefunc + + +# The following is adapted from Alexander Belopolsky's tz library +# https://github.com/abalkin/tz +if hasattr(datetime, 'fold'): + # This is the pre-python 3.6 fold situation + def enfold(dt, fold=1): + """ + Provides a unified interface for assigning the ``fold`` attribute to + datetimes both before and after the implementation of PEP-495. + + :param fold: + The value for the ``fold`` attribute in the returned datetime. This + should be either 0 or 1. + + :return: + Returns an object for which ``getattr(dt, 'fold', 0)`` returns + ``fold`` for all versions of Python. In versions prior to + Python 3.6, this is a ``_DatetimeWithFold`` object, which is a + subclass of :py:class:`datetime.datetime` with the ``fold`` + attribute added, if ``fold`` is 1. + + .. versionadded:: 2.6.0 + """ + return dt.replace(fold=fold) + +else: + class _DatetimeWithFold(datetime): + """ + This is a class designed to provide a PEP 495-compliant interface for + Python versions before 3.6. It is used only for dates in a fold, so + the ``fold`` attribute is fixed at ``1``. + + .. versionadded:: 2.6.0 + """ + __slots__ = () + + def replace(self, *args, **kwargs): + """ + Return a datetime with the same attributes, except for those + attributes given new values by whichever keyword arguments are + specified. Note that tzinfo=None can be specified to create a naive + datetime from an aware datetime with no conversion of date and time + data. + + This is reimplemented in ``_DatetimeWithFold`` because pypy3 will + return a ``datetime.datetime`` even if ``fold`` is unchanged. + """ + argnames = ( + 'year', 'month', 'day', 'hour', 'minute', 'second', + 'microsecond', 'tzinfo' + ) + + for arg, argname in zip(args, argnames): + if argname in kwargs: + raise TypeError('Duplicate argument: {}'.format(argname)) + + kwargs[argname] = arg + + for argname in argnames: + if argname not in kwargs: + kwargs[argname] = getattr(self, argname) + + dt_class = self.__class__ if kwargs.get('fold', 1) else datetime + + return dt_class(**kwargs) + + @property + def fold(self): + return 1 + + def enfold(dt, fold=1): + """ + Provides a unified interface for assigning the ``fold`` attribute to + datetimes both before and after the implementation of PEP-495. + + :param fold: + The value for the ``fold`` attribute in the returned datetime. This + should be either 0 or 1. + + :return: + Returns an object for which ``getattr(dt, 'fold', 0)`` returns + ``fold`` for all versions of Python. In versions prior to + Python 3.6, this is a ``_DatetimeWithFold`` object, which is a + subclass of :py:class:`datetime.datetime` with the ``fold`` + attribute added, if ``fold`` is 1. + + .. versionadded:: 2.6.0 + """ + if getattr(dt, 'fold', 0) == fold: + return dt + + args = dt.timetuple()[:6] + args += (dt.microsecond, dt.tzinfo) + + if fold: + return _DatetimeWithFold(*args) + else: + return datetime(*args) + + +def _validate_fromutc_inputs(f): + """ + The CPython version of ``fromutc`` checks that the input is a ``datetime`` + object and that ``self`` is attached as its ``tzinfo``. + """ + @wraps(f) + def fromutc(self, dt): + if not isinstance(dt, datetime): + raise TypeError("fromutc() requires a datetime argument") + if dt.tzinfo is not self: + raise ValueError("dt.tzinfo is not self") + + return f(self, dt) + + return fromutc + + +class _tzinfo(tzinfo): + """ + Base class for all ``dateutil`` ``tzinfo`` objects. + """ + + def is_ambiguous(self, dt): + """ + Whether or not the "wall time" of a given datetime is ambiguous in this + zone. + + :param dt: + A :py:class:`datetime.datetime`, naive or time zone aware. + + + :return: + Returns ``True`` if ambiguous, ``False`` otherwise. + + .. versionadded:: 2.6.0 + """ + + dt = dt.replace(tzinfo=self) + + wall_0 = enfold(dt, fold=0) + wall_1 = enfold(dt, fold=1) + + same_offset = wall_0.utcoffset() == wall_1.utcoffset() + same_dt = wall_0.replace(tzinfo=None) == wall_1.replace(tzinfo=None) + + return same_dt and not same_offset + + def _fold_status(self, dt_utc, dt_wall): + """ + Determine the fold status of a "wall" datetime, given a representation + of the same datetime as a (naive) UTC datetime. This is calculated based + on the assumption that ``dt.utcoffset() - dt.dst()`` is constant for all + datetimes, and that this offset is the actual number of hours separating + ``dt_utc`` and ``dt_wall``. + + :param dt_utc: + Representation of the datetime as UTC + + :param dt_wall: + Representation of the datetime as "wall time". This parameter must + either have a `fold` attribute or have a fold-naive + :class:`datetime.tzinfo` attached, otherwise the calculation may + fail. + """ + if self.is_ambiguous(dt_wall): + delta_wall = dt_wall - dt_utc + _fold = int(delta_wall == (dt_utc.utcoffset() - dt_utc.dst())) + else: + _fold = 0 + + return _fold + + def _fold(self, dt): + return getattr(dt, 'fold', 0) + + def _fromutc(self, dt): + """ + Given a timezone-aware datetime in a given timezone, calculates a + timezone-aware datetime in a new timezone. + + Since this is the one time that we *know* we have an unambiguous + datetime object, we take this opportunity to determine whether the + datetime is ambiguous and in a "fold" state (e.g. if it's the first + occurrence, chronologically, of the ambiguous datetime). + + :param dt: + A timezone-aware :class:`datetime.datetime` object. + """ + + # Re-implement the algorithm from Python's datetime.py + dtoff = dt.utcoffset() + if dtoff is None: + raise ValueError("fromutc() requires a non-None utcoffset() " + "result") + + # The original datetime.py code assumes that `dst()` defaults to + # zero during ambiguous times. PEP 495 inverts this presumption, so + # for pre-PEP 495 versions of python, we need to tweak the algorithm. + dtdst = dt.dst() + if dtdst is None: + raise ValueError("fromutc() requires a non-None dst() result") + delta = dtoff - dtdst + + dt += delta + # Set fold=1 so we can default to being in the fold for + # ambiguous dates. + dtdst = enfold(dt, fold=1).dst() + if dtdst is None: + raise ValueError("fromutc(): dt.dst gave inconsistent " + "results; cannot convert") + return dt + dtdst + + @_validate_fromutc_inputs + def fromutc(self, dt): + """ + Given a timezone-aware datetime in a given timezone, calculates a + timezone-aware datetime in a new timezone. + + Since this is the one time that we *know* we have an unambiguous + datetime object, we take this opportunity to determine whether the + datetime is ambiguous and in a "fold" state (e.g. if it's the first + occurrence, chronologically, of the ambiguous datetime). + + :param dt: + A timezone-aware :class:`datetime.datetime` object. + """ + dt_wall = self._fromutc(dt) + + # Calculate the fold status given the two datetimes. + _fold = self._fold_status(dt, dt_wall) + + # Set the default fold value for ambiguous dates + return enfold(dt_wall, fold=_fold) + + +class tzrangebase(_tzinfo): + """ + This is an abstract base class for time zones represented by an annual + transition into and out of DST. Child classes should implement the following + methods: + + * ``__init__(self, *args, **kwargs)`` + * ``transitions(self, year)`` - this is expected to return a tuple of + datetimes representing the DST on and off transitions in standard + time. + + A fully initialized ``tzrangebase`` subclass should also provide the + following attributes: + * ``hasdst``: Boolean whether or not the zone uses DST. + * ``_dst_offset`` / ``_std_offset``: :class:`datetime.timedelta` objects + representing the respective UTC offsets. + * ``_dst_abbr`` / ``_std_abbr``: Strings representing the timezone short + abbreviations in DST and STD, respectively. + * ``_hasdst``: Whether or not the zone has DST. + + .. versionadded:: 2.6.0 + """ + def __init__(self): + raise NotImplementedError('tzrangebase is an abstract base class') + + def utcoffset(self, dt): + isdst = self._isdst(dt) + + if isdst is None: + return None + elif isdst: + return self._dst_offset + else: + return self._std_offset + + def dst(self, dt): + isdst = self._isdst(dt) + + if isdst is None: + return None + elif isdst: + return self._dst_base_offset + else: + return ZERO + + @tzname_in_python2 + def tzname(self, dt): + if self._isdst(dt): + return self._dst_abbr + else: + return self._std_abbr + + def fromutc(self, dt): + """ Given a datetime in UTC, return local time """ + if not isinstance(dt, datetime): + raise TypeError("fromutc() requires a datetime argument") + + if dt.tzinfo is not self: + raise ValueError("dt.tzinfo is not self") + + # Get transitions - if there are none, fixed offset + transitions = self.transitions(dt.year) + if transitions is None: + return dt + self.utcoffset(dt) + + # Get the transition times in UTC + dston, dstoff = transitions + + dston -= self._std_offset + dstoff -= self._std_offset + + utc_transitions = (dston, dstoff) + dt_utc = dt.replace(tzinfo=None) + + isdst = self._naive_isdst(dt_utc, utc_transitions) + + if isdst: + dt_wall = dt + self._dst_offset + else: + dt_wall = dt + self._std_offset + + _fold = int(not isdst and self.is_ambiguous(dt_wall)) + + return enfold(dt_wall, fold=_fold) + + def is_ambiguous(self, dt): + """ + Whether or not the "wall time" of a given datetime is ambiguous in this + zone. + + :param dt: + A :py:class:`datetime.datetime`, naive or time zone aware. + + + :return: + Returns ``True`` if ambiguous, ``False`` otherwise. + + .. versionadded:: 2.6.0 + """ + if not self.hasdst: + return False + + start, end = self.transitions(dt.year) + + dt = dt.replace(tzinfo=None) + return (end <= dt < end + self._dst_base_offset) + + def _isdst(self, dt): + if not self.hasdst: + return False + elif dt is None: + return None + + transitions = self.transitions(dt.year) + + if transitions is None: + return False + + dt = dt.replace(tzinfo=None) + + isdst = self._naive_isdst(dt, transitions) + + # Handle ambiguous dates + if not isdst and self.is_ambiguous(dt): + return not self._fold(dt) + else: + return isdst + + def _naive_isdst(self, dt, transitions): + dston, dstoff = transitions + + dt = dt.replace(tzinfo=None) + + if dston < dstoff: + isdst = dston <= dt < dstoff + else: + isdst = not dstoff <= dt < dston + + return isdst + + @property + def _dst_base_offset(self): + return self._dst_offset - self._std_offset + + __hash__ = None + + def __ne__(self, other): + return not (self == other) + + def __repr__(self): + return "%s(...)" % self.__class__.__name__ + + __reduce__ = object.__reduce__ diff --git a/third_party/python/python_dateutil/dateutil/tz/_factories.py b/third_party/python/python_dateutil/dateutil/tz/_factories.py new file mode 100644 index 0000000000000..f8a65891a023e --- /dev/null +++ b/third_party/python/python_dateutil/dateutil/tz/_factories.py @@ -0,0 +1,80 @@ +from datetime import timedelta +import weakref +from collections import OrderedDict + +from six.moves import _thread + + +class _TzSingleton(type): + def __init__(cls, *args, **kwargs): + cls.__instance = None + super(_TzSingleton, cls).__init__(*args, **kwargs) + + def __call__(cls): + if cls.__instance is None: + cls.__instance = super(_TzSingleton, cls).__call__() + return cls.__instance + + +class _TzFactory(type): + def instance(cls, *args, **kwargs): + """Alternate constructor that returns a fresh instance""" + return type.__call__(cls, *args, **kwargs) + + +class _TzOffsetFactory(_TzFactory): + def __init__(cls, *args, **kwargs): + cls.__instances = weakref.WeakValueDictionary() + cls.__strong_cache = OrderedDict() + cls.__strong_cache_size = 8 + + cls._cache_lock = _thread.allocate_lock() + + def __call__(cls, name, offset): + if isinstance(offset, timedelta): + key = (name, offset.total_seconds()) + else: + key = (name, offset) + + instance = cls.__instances.get(key, None) + if instance is None: + instance = cls.__instances.setdefault(key, + cls.instance(name, offset)) + + # This lock may not be necessary in Python 3. See GH issue #901 + with cls._cache_lock: + cls.__strong_cache[key] = cls.__strong_cache.pop(key, instance) + + # Remove an item if the strong cache is overpopulated + if len(cls.__strong_cache) > cls.__strong_cache_size: + cls.__strong_cache.popitem(last=False) + + return instance + + +class _TzStrFactory(_TzFactory): + def __init__(cls, *args, **kwargs): + cls.__instances = weakref.WeakValueDictionary() + cls.__strong_cache = OrderedDict() + cls.__strong_cache_size = 8 + + cls.__cache_lock = _thread.allocate_lock() + + def __call__(cls, s, posix_offset=False): + key = (s, posix_offset) + instance = cls.__instances.get(key, None) + + if instance is None: + instance = cls.__instances.setdefault(key, + cls.instance(s, posix_offset)) + + # This lock may not be necessary in Python 3. See GH issue #901 + with cls.__cache_lock: + cls.__strong_cache[key] = cls.__strong_cache.pop(key, instance) + + # Remove an item if the strong cache is overpopulated + if len(cls.__strong_cache) > cls.__strong_cache_size: + cls.__strong_cache.popitem(last=False) + + return instance + diff --git a/third_party/python/python_dateutil/dateutil/tz/tz.py b/third_party/python/python_dateutil/dateutil/tz/tz.py new file mode 100644 index 0000000000000..c67f56d4659f1 --- /dev/null +++ b/third_party/python/python_dateutil/dateutil/tz/tz.py @@ -0,0 +1,1849 @@ +# -*- coding: utf-8 -*- +""" +This module offers timezone implementations subclassing the abstract +:py:class:`datetime.tzinfo` type. There are classes to handle tzfile format +files (usually are in :file:`/etc/localtime`, :file:`/usr/share/zoneinfo`, +etc), TZ environment string (in all known formats), given ranges (with help +from relative deltas), local machine timezone, fixed offset timezone, and UTC +timezone. +""" +import datetime +import struct +import time +import sys +import os +import bisect +import weakref +from collections import OrderedDict + +import six +from six import string_types +from six.moves import _thread +from ._common import tzname_in_python2, _tzinfo +from ._common import tzrangebase, enfold +from ._common import _validate_fromutc_inputs + +from ._factories import _TzSingleton, _TzOffsetFactory +from ._factories import _TzStrFactory +try: + from .win import tzwin, tzwinlocal +except ImportError: + tzwin = tzwinlocal = None + +# For warning about rounding tzinfo +from warnings import warn + +ZERO = datetime.timedelta(0) +EPOCH = datetime.datetime.utcfromtimestamp(0) +EPOCHORDINAL = EPOCH.toordinal() + + +@six.add_metaclass(_TzSingleton) +class tzutc(datetime.tzinfo): + """ + This is a tzinfo object that represents the UTC time zone. + + **Examples:** + + .. doctest:: + + >>> from datetime import * + >>> from dateutil.tz import * + + >>> datetime.now() + datetime.datetime(2003, 9, 27, 9, 40, 1, 521290) + + >>> datetime.now(tzutc()) + datetime.datetime(2003, 9, 27, 12, 40, 12, 156379, tzinfo=tzutc()) + + >>> datetime.now(tzutc()).tzname() + 'UTC' + + .. versionchanged:: 2.7.0 + ``tzutc()`` is now a singleton, so the result of ``tzutc()`` will + always return the same object. + + .. doctest:: + + >>> from dateutil.tz import tzutc, UTC + >>> tzutc() is tzutc() + True + >>> tzutc() is UTC + True + """ + def utcoffset(self, dt): + return ZERO + + def dst(self, dt): + return ZERO + + @tzname_in_python2 + def tzname(self, dt): + return "UTC" + + def is_ambiguous(self, dt): + """ + Whether or not the "wall time" of a given datetime is ambiguous in this + zone. + + :param dt: + A :py:class:`datetime.datetime`, naive or time zone aware. + + + :return: + Returns ``True`` if ambiguous, ``False`` otherwise. + + .. versionadded:: 2.6.0 + """ + return False + + @_validate_fromutc_inputs + def fromutc(self, dt): + """ + Fast track version of fromutc() returns the original ``dt`` object for + any valid :py:class:`datetime.datetime` object. + """ + return dt + + def __eq__(self, other): + if not isinstance(other, (tzutc, tzoffset)): + return NotImplemented + + return (isinstance(other, tzutc) or + (isinstance(other, tzoffset) and other._offset == ZERO)) + + __hash__ = None + + def __ne__(self, other): + return not (self == other) + + def __repr__(self): + return "%s()" % self.__class__.__name__ + + __reduce__ = object.__reduce__ + + +#: Convenience constant providing a :class:`tzutc()` instance +#: +#: .. versionadded:: 2.7.0 +UTC = tzutc() + + +@six.add_metaclass(_TzOffsetFactory) +class tzoffset(datetime.tzinfo): + """ + A simple class for representing a fixed offset from UTC. + + :param name: + The timezone name, to be returned when ``tzname()`` is called. + :param offset: + The time zone offset in seconds, or (since version 2.6.0, represented + as a :py:class:`datetime.timedelta` object). + """ + def __init__(self, name, offset): + self._name = name + + try: + # Allow a timedelta + offset = offset.total_seconds() + except (TypeError, AttributeError): + pass + + self._offset = datetime.timedelta(seconds=_get_supported_offset(offset)) + + def utcoffset(self, dt): + return self._offset + + def dst(self, dt): + return ZERO + + @tzname_in_python2 + def tzname(self, dt): + return self._name + + @_validate_fromutc_inputs + def fromutc(self, dt): + return dt + self._offset + + def is_ambiguous(self, dt): + """ + Whether or not the "wall time" of a given datetime is ambiguous in this + zone. + + :param dt: + A :py:class:`datetime.datetime`, naive or time zone aware. + :return: + Returns ``True`` if ambiguous, ``False`` otherwise. + + .. versionadded:: 2.6.0 + """ + return False + + def __eq__(self, other): + if not isinstance(other, tzoffset): + return NotImplemented + + return self._offset == other._offset + + __hash__ = None + + def __ne__(self, other): + return not (self == other) + + def __repr__(self): + return "%s(%s, %s)" % (self.__class__.__name__, + repr(self._name), + int(self._offset.total_seconds())) + + __reduce__ = object.__reduce__ + + +class tzlocal(_tzinfo): + """ + A :class:`tzinfo` subclass built around the ``time`` timezone functions. + """ + def __init__(self): + super(tzlocal, self).__init__() + + self._std_offset = datetime.timedelta(seconds=-time.timezone) + if time.daylight: + self._dst_offset = datetime.timedelta(seconds=-time.altzone) + else: + self._dst_offset = self._std_offset + + self._dst_saved = self._dst_offset - self._std_offset + self._hasdst = bool(self._dst_saved) + self._tznames = tuple(time.tzname) + + def utcoffset(self, dt): + if dt is None and self._hasdst: + return None + + if self._isdst(dt): + return self._dst_offset + else: + return self._std_offset + + def dst(self, dt): + if dt is None and self._hasdst: + return None + + if self._isdst(dt): + return self._dst_offset - self._std_offset + else: + return ZERO + + @tzname_in_python2 + def tzname(self, dt): + return self._tznames[self._isdst(dt)] + + def is_ambiguous(self, dt): + """ + Whether or not the "wall time" of a given datetime is ambiguous in this + zone. + + :param dt: + A :py:class:`datetime.datetime`, naive or time zone aware. + + + :return: + Returns ``True`` if ambiguous, ``False`` otherwise. + + .. versionadded:: 2.6.0 + """ + naive_dst = self._naive_is_dst(dt) + return (not naive_dst and + (naive_dst != self._naive_is_dst(dt - self._dst_saved))) + + def _naive_is_dst(self, dt): + timestamp = _datetime_to_timestamp(dt) + return time.localtime(timestamp + time.timezone).tm_isdst + + def _isdst(self, dt, fold_naive=True): + # We can't use mktime here. It is unstable when deciding if + # the hour near to a change is DST or not. + # + # timestamp = time.mktime((dt.year, dt.month, dt.day, dt.hour, + # dt.minute, dt.second, dt.weekday(), 0, -1)) + # return time.localtime(timestamp).tm_isdst + # + # The code above yields the following result: + # + # >>> import tz, datetime + # >>> t = tz.tzlocal() + # >>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname() + # 'BRDT' + # >>> datetime.datetime(2003,2,16,0,tzinfo=t).tzname() + # 'BRST' + # >>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname() + # 'BRST' + # >>> datetime.datetime(2003,2,15,22,tzinfo=t).tzname() + # 'BRDT' + # >>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname() + # 'BRDT' + # + # Here is a more stable implementation: + # + if not self._hasdst: + return False + + # Check for ambiguous times: + dstval = self._naive_is_dst(dt) + fold = getattr(dt, 'fold', None) + + if self.is_ambiguous(dt): + if fold is not None: + return not self._fold(dt) + else: + return True + + return dstval + + def __eq__(self, other): + if isinstance(other, tzlocal): + return (self._std_offset == other._std_offset and + self._dst_offset == other._dst_offset) + elif isinstance(other, tzutc): + return (not self._hasdst and + self._tznames[0] in {'UTC', 'GMT'} and + self._std_offset == ZERO) + elif isinstance(other, tzoffset): + return (not self._hasdst and + self._tznames[0] == other._name and + self._std_offset == other._offset) + else: + return NotImplemented + + __hash__ = None + + def __ne__(self, other): + return not (self == other) + + def __repr__(self): + return "%s()" % self.__class__.__name__ + + __reduce__ = object.__reduce__ + + +class _ttinfo(object): + __slots__ = ["offset", "delta", "isdst", "abbr", + "isstd", "isgmt", "dstoffset"] + + def __init__(self): + for attr in self.__slots__: + setattr(self, attr, None) + + def __repr__(self): + l = [] + for attr in self.__slots__: + value = getattr(self, attr) + if value is not None: + l.append("%s=%s" % (attr, repr(value))) + return "%s(%s)" % (self.__class__.__name__, ", ".join(l)) + + def __eq__(self, other): + if not isinstance(other, _ttinfo): + return NotImplemented + + return (self.offset == other.offset and + self.delta == other.delta and + self.isdst == other.isdst and + self.abbr == other.abbr and + self.isstd == other.isstd and + self.isgmt == other.isgmt and + self.dstoffset == other.dstoffset) + + __hash__ = None + + def __ne__(self, other): + return not (self == other) + + def __getstate__(self): + state = {} + for name in self.__slots__: + state[name] = getattr(self, name, None) + return state + + def __setstate__(self, state): + for name in self.__slots__: + if name in state: + setattr(self, name, state[name]) + + +class _tzfile(object): + """ + Lightweight class for holding the relevant transition and time zone + information read from binary tzfiles. + """ + attrs = ['trans_list', 'trans_list_utc', 'trans_idx', 'ttinfo_list', + 'ttinfo_std', 'ttinfo_dst', 'ttinfo_before', 'ttinfo_first'] + + def __init__(self, **kwargs): + for attr in self.attrs: + setattr(self, attr, kwargs.get(attr, None)) + + +class tzfile(_tzinfo): + """ + This is a ``tzinfo`` subclass that allows one to use the ``tzfile(5)`` + format timezone files to extract current and historical zone information. + + :param fileobj: + This can be an opened file stream or a file name that the time zone + information can be read from. + + :param filename: + This is an optional parameter specifying the source of the time zone + information in the event that ``fileobj`` is a file object. If omitted + and ``fileobj`` is a file stream, this parameter will be set either to + ``fileobj``'s ``name`` attribute or to ``repr(fileobj)``. + + See `Sources for Time Zone and Daylight Saving Time Data + `_ for more information. + Time zone files can be compiled from the `IANA Time Zone database files + `_ with the `zic time zone compiler + `_ + + .. note:: + + Only construct a ``tzfile`` directly if you have a specific timezone + file on disk that you want to read into a Python ``tzinfo`` object. + If you want to get a ``tzfile`` representing a specific IANA zone, + (e.g. ``'America/New_York'``), you should call + :func:`dateutil.tz.gettz` with the zone identifier. + + + **Examples:** + + Using the US Eastern time zone as an example, we can see that a ``tzfile`` + provides time zone information for the standard Daylight Saving offsets: + + .. testsetup:: tzfile + + from dateutil.tz import gettz + from datetime import datetime + + .. doctest:: tzfile + + >>> NYC = gettz('America/New_York') + >>> NYC + tzfile('/usr/share/zoneinfo/America/New_York') + + >>> print(datetime(2016, 1, 3, tzinfo=NYC)) # EST + 2016-01-03 00:00:00-05:00 + + >>> print(datetime(2016, 7, 7, tzinfo=NYC)) # EDT + 2016-07-07 00:00:00-04:00 + + + The ``tzfile`` structure contains a fully history of the time zone, + so historical dates will also have the right offsets. For example, before + the adoption of the UTC standards, New York used local solar mean time: + + .. doctest:: tzfile + + >>> print(datetime(1901, 4, 12, tzinfo=NYC)) # LMT + 1901-04-12 00:00:00-04:56 + + And during World War II, New York was on "Eastern War Time", which was a + state of permanent daylight saving time: + + .. doctest:: tzfile + + >>> print(datetime(1944, 2, 7, tzinfo=NYC)) # EWT + 1944-02-07 00:00:00-04:00 + + """ + + def __init__(self, fileobj, filename=None): + super(tzfile, self).__init__() + + file_opened_here = False + if isinstance(fileobj, string_types): + self._filename = fileobj + fileobj = open(fileobj, 'rb') + file_opened_here = True + elif filename is not None: + self._filename = filename + elif hasattr(fileobj, "name"): + self._filename = fileobj.name + else: + self._filename = repr(fileobj) + + if fileobj is not None: + if not file_opened_here: + fileobj = _nullcontext(fileobj) + + with fileobj as file_stream: + tzobj = self._read_tzfile(file_stream) + + self._set_tzdata(tzobj) + + def _set_tzdata(self, tzobj): + """ Set the time zone data of this object from a _tzfile object """ + # Copy the relevant attributes over as private attributes + for attr in _tzfile.attrs: + setattr(self, '_' + attr, getattr(tzobj, attr)) + + def _read_tzfile(self, fileobj): + out = _tzfile() + + # From tzfile(5): + # + # The time zone information files used by tzset(3) + # begin with the magic characters "TZif" to identify + # them as time zone information files, followed by + # sixteen bytes reserved for future use, followed by + # six four-byte values of type long, written in a + # ``standard'' byte order (the high-order byte + # of the value is written first). + if fileobj.read(4).decode() != "TZif": + raise ValueError("magic not found") + + fileobj.read(16) + + ( + # The number of UTC/local indicators stored in the file. + ttisgmtcnt, + + # The number of standard/wall indicators stored in the file. + ttisstdcnt, + + # The number of leap seconds for which data is + # stored in the file. + leapcnt, + + # The number of "transition times" for which data + # is stored in the file. + timecnt, + + # The number of "local time types" for which data + # is stored in the file (must not be zero). + typecnt, + + # The number of characters of "time zone + # abbreviation strings" stored in the file. + charcnt, + + ) = struct.unpack(">6l", fileobj.read(24)) + + # The above header is followed by tzh_timecnt four-byte + # values of type long, sorted in ascending order. + # These values are written in ``standard'' byte order. + # Each is used as a transition time (as returned by + # time(2)) at which the rules for computing local time + # change. + + if timecnt: + out.trans_list_utc = list(struct.unpack(">%dl" % timecnt, + fileobj.read(timecnt*4))) + else: + out.trans_list_utc = [] + + # Next come tzh_timecnt one-byte values of type unsigned + # char; each one tells which of the different types of + # ``local time'' types described in the file is associated + # with the same-indexed transition time. These values + # serve as indices into an array of ttinfo structures that + # appears next in the file. + + if timecnt: + out.trans_idx = struct.unpack(">%dB" % timecnt, + fileobj.read(timecnt)) + else: + out.trans_idx = [] + + # Each ttinfo structure is written as a four-byte value + # for tt_gmtoff of type long, in a standard byte + # order, followed by a one-byte value for tt_isdst + # and a one-byte value for tt_abbrind. In each + # structure, tt_gmtoff gives the number of + # seconds to be added to UTC, tt_isdst tells whether + # tm_isdst should be set by localtime(3), and + # tt_abbrind serves as an index into the array of + # time zone abbreviation characters that follow the + # ttinfo structure(s) in the file. + + ttinfo = [] + + for i in range(typecnt): + ttinfo.append(struct.unpack(">lbb", fileobj.read(6))) + + abbr = fileobj.read(charcnt).decode() + + # Then there are tzh_leapcnt pairs of four-byte + # values, written in standard byte order; the + # first value of each pair gives the time (as + # returned by time(2)) at which a leap second + # occurs; the second gives the total number of + # leap seconds to be applied after the given time. + # The pairs of values are sorted in ascending order + # by time. + + # Not used, for now (but seek for correct file position) + if leapcnt: + fileobj.seek(leapcnt * 8, os.SEEK_CUR) + + # Then there are tzh_ttisstdcnt standard/wall + # indicators, each stored as a one-byte value; + # they tell whether the transition times associated + # with local time types were specified as standard + # time or wall clock time, and are used when + # a time zone file is used in handling POSIX-style + # time zone environment variables. + + if ttisstdcnt: + isstd = struct.unpack(">%db" % ttisstdcnt, + fileobj.read(ttisstdcnt)) + + # Finally, there are tzh_ttisgmtcnt UTC/local + # indicators, each stored as a one-byte value; + # they tell whether the transition times associated + # with local time types were specified as UTC or + # local time, and are used when a time zone file + # is used in handling POSIX-style time zone envi- + # ronment variables. + + if ttisgmtcnt: + isgmt = struct.unpack(">%db" % ttisgmtcnt, + fileobj.read(ttisgmtcnt)) + + # Build ttinfo list + out.ttinfo_list = [] + for i in range(typecnt): + gmtoff, isdst, abbrind = ttinfo[i] + gmtoff = _get_supported_offset(gmtoff) + tti = _ttinfo() + tti.offset = gmtoff + tti.dstoffset = datetime.timedelta(0) + tti.delta = datetime.timedelta(seconds=gmtoff) + tti.isdst = isdst + tti.abbr = abbr[abbrind:abbr.find('\x00', abbrind)] + tti.isstd = (ttisstdcnt > i and isstd[i] != 0) + tti.isgmt = (ttisgmtcnt > i and isgmt[i] != 0) + out.ttinfo_list.append(tti) + + # Replace ttinfo indexes for ttinfo objects. + out.trans_idx = [out.ttinfo_list[idx] for idx in out.trans_idx] + + # Set standard, dst, and before ttinfos. before will be + # used when a given time is before any transitions, + # and will be set to the first non-dst ttinfo, or to + # the first dst, if all of them are dst. + out.ttinfo_std = None + out.ttinfo_dst = None + out.ttinfo_before = None + if out.ttinfo_list: + if not out.trans_list_utc: + out.ttinfo_std = out.ttinfo_first = out.ttinfo_list[0] + else: + for i in range(timecnt-1, -1, -1): + tti = out.trans_idx[i] + if not out.ttinfo_std and not tti.isdst: + out.ttinfo_std = tti + elif not out.ttinfo_dst and tti.isdst: + out.ttinfo_dst = tti + + if out.ttinfo_std and out.ttinfo_dst: + break + else: + if out.ttinfo_dst and not out.ttinfo_std: + out.ttinfo_std = out.ttinfo_dst + + for tti in out.ttinfo_list: + if not tti.isdst: + out.ttinfo_before = tti + break + else: + out.ttinfo_before = out.ttinfo_list[0] + + # Now fix transition times to become relative to wall time. + # + # I'm not sure about this. In my tests, the tz source file + # is setup to wall time, and in the binary file isstd and + # isgmt are off, so it should be in wall time. OTOH, it's + # always in gmt time. Let me know if you have comments + # about this. + lastdst = None + lastoffset = None + lastdstoffset = None + lastbaseoffset = None + out.trans_list = [] + + for i, tti in enumerate(out.trans_idx): + offset = tti.offset + dstoffset = 0 + + if lastdst is not None: + if tti.isdst: + if not lastdst: + dstoffset = offset - lastoffset + + if not dstoffset and lastdstoffset: + dstoffset = lastdstoffset + + tti.dstoffset = datetime.timedelta(seconds=dstoffset) + lastdstoffset = dstoffset + + # If a time zone changes its base offset during a DST transition, + # then you need to adjust by the previous base offset to get the + # transition time in local time. Otherwise you use the current + # base offset. Ideally, I would have some mathematical proof of + # why this is true, but I haven't really thought about it enough. + baseoffset = offset - dstoffset + adjustment = baseoffset + if (lastbaseoffset is not None and baseoffset != lastbaseoffset + and tti.isdst != lastdst): + # The base DST has changed + adjustment = lastbaseoffset + + lastdst = tti.isdst + lastoffset = offset + lastbaseoffset = baseoffset + + out.trans_list.append(out.trans_list_utc[i] + adjustment) + + out.trans_idx = tuple(out.trans_idx) + out.trans_list = tuple(out.trans_list) + out.trans_list_utc = tuple(out.trans_list_utc) + + return out + + def _find_last_transition(self, dt, in_utc=False): + # If there's no list, there are no transitions to find + if not self._trans_list: + return None + + timestamp = _datetime_to_timestamp(dt) + + # Find where the timestamp fits in the transition list - if the + # timestamp is a transition time, it's part of the "after" period. + trans_list = self._trans_list_utc if in_utc else self._trans_list + idx = bisect.bisect_right(trans_list, timestamp) + + # We want to know when the previous transition was, so subtract off 1 + return idx - 1 + + def _get_ttinfo(self, idx): + # For no list or after the last transition, default to _ttinfo_std + if idx is None or (idx + 1) >= len(self._trans_list): + return self._ttinfo_std + + # If there is a list and the time is before it, return _ttinfo_before + if idx < 0: + return self._ttinfo_before + + return self._trans_idx[idx] + + def _find_ttinfo(self, dt): + idx = self._resolve_ambiguous_time(dt) + + return self._get_ttinfo(idx) + + def fromutc(self, dt): + """ + The ``tzfile`` implementation of :py:func:`datetime.tzinfo.fromutc`. + + :param dt: + A :py:class:`datetime.datetime` object. + + :raises TypeError: + Raised if ``dt`` is not a :py:class:`datetime.datetime` object. + + :raises ValueError: + Raised if this is called with a ``dt`` which does not have this + ``tzinfo`` attached. + + :return: + Returns a :py:class:`datetime.datetime` object representing the + wall time in ``self``'s time zone. + """ + # These isinstance checks are in datetime.tzinfo, so we'll preserve + # them, even if we don't care about duck typing. + if not isinstance(dt, datetime.datetime): + raise TypeError("fromutc() requires a datetime argument") + + if dt.tzinfo is not self: + raise ValueError("dt.tzinfo is not self") + + # First treat UTC as wall time and get the transition we're in. + idx = self._find_last_transition(dt, in_utc=True) + tti = self._get_ttinfo(idx) + + dt_out = dt + datetime.timedelta(seconds=tti.offset) + + fold = self.is_ambiguous(dt_out, idx=idx) + + return enfold(dt_out, fold=int(fold)) + + def is_ambiguous(self, dt, idx=None): + """ + Whether or not the "wall time" of a given datetime is ambiguous in this + zone. + + :param dt: + A :py:class:`datetime.datetime`, naive or time zone aware. + + + :return: + Returns ``True`` if ambiguous, ``False`` otherwise. + + .. versionadded:: 2.6.0 + """ + if idx is None: + idx = self._find_last_transition(dt) + + # Calculate the difference in offsets from current to previous + timestamp = _datetime_to_timestamp(dt) + tti = self._get_ttinfo(idx) + + if idx is None or idx <= 0: + return False + + od = self._get_ttinfo(idx - 1).offset - tti.offset + tt = self._trans_list[idx] # Transition time + + return timestamp < tt + od + + def _resolve_ambiguous_time(self, dt): + idx = self._find_last_transition(dt) + + # If we have no transitions, return the index + _fold = self._fold(dt) + if idx is None or idx == 0: + return idx + + # If it's ambiguous and we're in a fold, shift to a different index. + idx_offset = int(not _fold and self.is_ambiguous(dt, idx)) + + return idx - idx_offset + + def utcoffset(self, dt): + if dt is None: + return None + + if not self._ttinfo_std: + return ZERO + + return self._find_ttinfo(dt).delta + + def dst(self, dt): + if dt is None: + return None + + if not self._ttinfo_dst: + return ZERO + + tti = self._find_ttinfo(dt) + + if not tti.isdst: + return ZERO + + # The documentation says that utcoffset()-dst() must + # be constant for every dt. + return tti.dstoffset + + @tzname_in_python2 + def tzname(self, dt): + if not self._ttinfo_std or dt is None: + return None + return self._find_ttinfo(dt).abbr + + def __eq__(self, other): + if not isinstance(other, tzfile): + return NotImplemented + return (self._trans_list == other._trans_list and + self._trans_idx == other._trans_idx and + self._ttinfo_list == other._ttinfo_list) + + __hash__ = None + + def __ne__(self, other): + return not (self == other) + + def __repr__(self): + return "%s(%s)" % (self.__class__.__name__, repr(self._filename)) + + def __reduce__(self): + return self.__reduce_ex__(None) + + def __reduce_ex__(self, protocol): + return (self.__class__, (None, self._filename), self.__dict__) + + +class tzrange(tzrangebase): + """ + The ``tzrange`` object is a time zone specified by a set of offsets and + abbreviations, equivalent to the way the ``TZ`` variable can be specified + in POSIX-like systems, but using Python delta objects to specify DST + start, end and offsets. + + :param stdabbr: + The abbreviation for standard time (e.g. ``'EST'``). + + :param stdoffset: + An integer or :class:`datetime.timedelta` object or equivalent + specifying the base offset from UTC. + + If unspecified, +00:00 is used. + + :param dstabbr: + The abbreviation for DST / "Summer" time (e.g. ``'EDT'``). + + If specified, with no other DST information, DST is assumed to occur + and the default behavior or ``dstoffset``, ``start`` and ``end`` is + used. If unspecified and no other DST information is specified, it + is assumed that this zone has no DST. + + If this is unspecified and other DST information is *is* specified, + DST occurs in the zone but the time zone abbreviation is left + unchanged. + + :param dstoffset: + A an integer or :class:`datetime.timedelta` object or equivalent + specifying the UTC offset during DST. If unspecified and any other DST + information is specified, it is assumed to be the STD offset +1 hour. + + :param start: + A :class:`relativedelta.relativedelta` object or equivalent specifying + the time and time of year that daylight savings time starts. To + specify, for example, that DST starts at 2AM on the 2nd Sunday in + March, pass: + + ``relativedelta(hours=2, month=3, day=1, weekday=SU(+2))`` + + If unspecified and any other DST information is specified, the default + value is 2 AM on the first Sunday in April. + + :param end: + A :class:`relativedelta.relativedelta` object or equivalent + representing the time and time of year that daylight savings time + ends, with the same specification method as in ``start``. One note is + that this should point to the first time in the *standard* zone, so if + a transition occurs at 2AM in the DST zone and the clocks are set back + 1 hour to 1AM, set the ``hours`` parameter to +1. + + + **Examples:** + + .. testsetup:: tzrange + + from dateutil.tz import tzrange, tzstr + + .. doctest:: tzrange + + >>> tzstr('EST5EDT') == tzrange("EST", -18000, "EDT") + True + + >>> from dateutil.relativedelta import * + >>> range1 = tzrange("EST", -18000, "EDT") + >>> range2 = tzrange("EST", -18000, "EDT", -14400, + ... relativedelta(hours=+2, month=4, day=1, + ... weekday=SU(+1)), + ... relativedelta(hours=+1, month=10, day=31, + ... weekday=SU(-1))) + >>> tzstr('EST5EDT') == range1 == range2 + True + + """ + def __init__(self, stdabbr, stdoffset=None, + dstabbr=None, dstoffset=None, + start=None, end=None): + + global relativedelta + from dateutil import relativedelta + + self._std_abbr = stdabbr + self._dst_abbr = dstabbr + + try: + stdoffset = stdoffset.total_seconds() + except (TypeError, AttributeError): + pass + + try: + dstoffset = dstoffset.total_seconds() + except (TypeError, AttributeError): + pass + + if stdoffset is not None: + self._std_offset = datetime.timedelta(seconds=stdoffset) + else: + self._std_offset = ZERO + + if dstoffset is not None: + self._dst_offset = datetime.timedelta(seconds=dstoffset) + elif dstabbr and stdoffset is not None: + self._dst_offset = self._std_offset + datetime.timedelta(hours=+1) + else: + self._dst_offset = ZERO + + if dstabbr and start is None: + self._start_delta = relativedelta.relativedelta( + hours=+2, month=4, day=1, weekday=relativedelta.SU(+1)) + else: + self._start_delta = start + + if dstabbr and end is None: + self._end_delta = relativedelta.relativedelta( + hours=+1, month=10, day=31, weekday=relativedelta.SU(-1)) + else: + self._end_delta = end + + self._dst_base_offset_ = self._dst_offset - self._std_offset + self.hasdst = bool(self._start_delta) + + def transitions(self, year): + """ + For a given year, get the DST on and off transition times, expressed + always on the standard time side. For zones with no transitions, this + function returns ``None``. + + :param year: + The year whose transitions you would like to query. + + :return: + Returns a :class:`tuple` of :class:`datetime.datetime` objects, + ``(dston, dstoff)`` for zones with an annual DST transition, or + ``None`` for fixed offset zones. + """ + if not self.hasdst: + return None + + base_year = datetime.datetime(year, 1, 1) + + start = base_year + self._start_delta + end = base_year + self._end_delta + + return (start, end) + + def __eq__(self, other): + if not isinstance(other, tzrange): + return NotImplemented + + return (self._std_abbr == other._std_abbr and + self._dst_abbr == other._dst_abbr and + self._std_offset == other._std_offset and + self._dst_offset == other._dst_offset and + self._start_delta == other._start_delta and + self._end_delta == other._end_delta) + + @property + def _dst_base_offset(self): + return self._dst_base_offset_ + + +@six.add_metaclass(_TzStrFactory) +class tzstr(tzrange): + """ + ``tzstr`` objects are time zone objects specified by a time-zone string as + it would be passed to a ``TZ`` variable on POSIX-style systems (see + the `GNU C Library: TZ Variable`_ for more details). + + There is one notable exception, which is that POSIX-style time zones use an + inverted offset format, so normally ``GMT+3`` would be parsed as an offset + 3 hours *behind* GMT. The ``tzstr`` time zone object will parse this as an + offset 3 hours *ahead* of GMT. If you would like to maintain the POSIX + behavior, pass a ``True`` value to ``posix_offset``. + + The :class:`tzrange` object provides the same functionality, but is + specified using :class:`relativedelta.relativedelta` objects. rather than + strings. + + :param s: + A time zone string in ``TZ`` variable format. This can be a + :class:`bytes` (2.x: :class:`str`), :class:`str` (2.x: + :class:`unicode`) or a stream emitting unicode characters + (e.g. :class:`StringIO`). + + :param posix_offset: + Optional. If set to ``True``, interpret strings such as ``GMT+3`` or + ``UTC+3`` as being 3 hours *behind* UTC rather than ahead, per the + POSIX standard. + + .. caution:: + + Prior to version 2.7.0, this function also supported time zones + in the format: + + * ``EST5EDT,4,0,6,7200,10,0,26,7200,3600`` + * ``EST5EDT,4,1,0,7200,10,-1,0,7200,3600`` + + This format is non-standard and has been deprecated; this function + will raise a :class:`DeprecatedTZFormatWarning` until + support is removed in a future version. + + .. _`GNU C Library: TZ Variable`: + https://www.gnu.org/software/libc/manual/html_node/TZ-Variable.html + """ + def __init__(self, s, posix_offset=False): + global parser + from dateutil.parser import _parser as parser + + self._s = s + + res = parser._parsetz(s) + if res is None or res.any_unused_tokens: + raise ValueError("unknown string format") + + # Here we break the compatibility with the TZ variable handling. + # GMT-3 actually *means* the timezone -3. + if res.stdabbr in ("GMT", "UTC") and not posix_offset: + res.stdoffset *= -1 + + # We must initialize it first, since _delta() needs + # _std_offset and _dst_offset set. Use False in start/end + # to avoid building it two times. + tzrange.__init__(self, res.stdabbr, res.stdoffset, + res.dstabbr, res.dstoffset, + start=False, end=False) + + if not res.dstabbr: + self._start_delta = None + self._end_delta = None + else: + self._start_delta = self._delta(res.start) + if self._start_delta: + self._end_delta = self._delta(res.end, isend=1) + + self.hasdst = bool(self._start_delta) + + def _delta(self, x, isend=0): + from dateutil import relativedelta + kwargs = {} + if x.month is not None: + kwargs["month"] = x.month + if x.weekday is not None: + kwargs["weekday"] = relativedelta.weekday(x.weekday, x.week) + if x.week > 0: + kwargs["day"] = 1 + else: + kwargs["day"] = 31 + elif x.day: + kwargs["day"] = x.day + elif x.yday is not None: + kwargs["yearday"] = x.yday + elif x.jyday is not None: + kwargs["nlyearday"] = x.jyday + if not kwargs: + # Default is to start on first sunday of april, and end + # on last sunday of october. + if not isend: + kwargs["month"] = 4 + kwargs["day"] = 1 + kwargs["weekday"] = relativedelta.SU(+1) + else: + kwargs["month"] = 10 + kwargs["day"] = 31 + kwargs["weekday"] = relativedelta.SU(-1) + if x.time is not None: + kwargs["seconds"] = x.time + else: + # Default is 2AM. + kwargs["seconds"] = 7200 + if isend: + # Convert to standard time, to follow the documented way + # of working with the extra hour. See the documentation + # of the tzinfo class. + delta = self._dst_offset - self._std_offset + kwargs["seconds"] -= delta.seconds + delta.days * 86400 + return relativedelta.relativedelta(**kwargs) + + def __repr__(self): + return "%s(%s)" % (self.__class__.__name__, repr(self._s)) + + +class _tzicalvtzcomp(object): + def __init__(self, tzoffsetfrom, tzoffsetto, isdst, + tzname=None, rrule=None): + self.tzoffsetfrom = datetime.timedelta(seconds=tzoffsetfrom) + self.tzoffsetto = datetime.timedelta(seconds=tzoffsetto) + self.tzoffsetdiff = self.tzoffsetto - self.tzoffsetfrom + self.isdst = isdst + self.tzname = tzname + self.rrule = rrule + + +class _tzicalvtz(_tzinfo): + def __init__(self, tzid, comps=[]): + super(_tzicalvtz, self).__init__() + + self._tzid = tzid + self._comps = comps + self._cachedate = [] + self._cachecomp = [] + self._cache_lock = _thread.allocate_lock() + + def _find_comp(self, dt): + if len(self._comps) == 1: + return self._comps[0] + + dt = dt.replace(tzinfo=None) + + try: + with self._cache_lock: + return self._cachecomp[self._cachedate.index( + (dt, self._fold(dt)))] + except ValueError: + pass + + lastcompdt = None + lastcomp = None + + for comp in self._comps: + compdt = self._find_compdt(comp, dt) + + if compdt and (not lastcompdt or lastcompdt < compdt): + lastcompdt = compdt + lastcomp = comp + + if not lastcomp: + # RFC says nothing about what to do when a given + # time is before the first onset date. We'll look for the + # first standard component, or the first component, if + # none is found. + for comp in self._comps: + if not comp.isdst: + lastcomp = comp + break + else: + lastcomp = comp[0] + + with self._cache_lock: + self._cachedate.insert(0, (dt, self._fold(dt))) + self._cachecomp.insert(0, lastcomp) + + if len(self._cachedate) > 10: + self._cachedate.pop() + self._cachecomp.pop() + + return lastcomp + + def _find_compdt(self, comp, dt): + if comp.tzoffsetdiff < ZERO and self._fold(dt): + dt -= comp.tzoffsetdiff + + compdt = comp.rrule.before(dt, inc=True) + + return compdt + + def utcoffset(self, dt): + if dt is None: + return None + + return self._find_comp(dt).tzoffsetto + + def dst(self, dt): + comp = self._find_comp(dt) + if comp.isdst: + return comp.tzoffsetdiff + else: + return ZERO + + @tzname_in_python2 + def tzname(self, dt): + return self._find_comp(dt).tzname + + def __repr__(self): + return "" % repr(self._tzid) + + __reduce__ = object.__reduce__ + + +class tzical(object): + """ + This object is designed to parse an iCalendar-style ``VTIMEZONE`` structure + as set out in `RFC 5545`_ Section 4.6.5 into one or more `tzinfo` objects. + + :param `fileobj`: + A file or stream in iCalendar format, which should be UTF-8 encoded + with CRLF endings. + + .. _`RFC 5545`: https://tools.ietf.org/html/rfc5545 + """ + def __init__(self, fileobj): + global rrule + from dateutil import rrule + + if isinstance(fileobj, string_types): + self._s = fileobj + # ical should be encoded in UTF-8 with CRLF + fileobj = open(fileobj, 'r') + else: + self._s = getattr(fileobj, 'name', repr(fileobj)) + fileobj = _nullcontext(fileobj) + + self._vtz = {} + + with fileobj as fobj: + self._parse_rfc(fobj.read()) + + def keys(self): + """ + Retrieves the available time zones as a list. + """ + return list(self._vtz.keys()) + + def get(self, tzid=None): + """ + Retrieve a :py:class:`datetime.tzinfo` object by its ``tzid``. + + :param tzid: + If there is exactly one time zone available, omitting ``tzid`` + or passing :py:const:`None` value returns it. Otherwise a valid + key (which can be retrieved from :func:`keys`) is required. + + :raises ValueError: + Raised if ``tzid`` is not specified but there are either more + or fewer than 1 zone defined. + + :returns: + Returns either a :py:class:`datetime.tzinfo` object representing + the relevant time zone or :py:const:`None` if the ``tzid`` was + not found. + """ + if tzid is None: + if len(self._vtz) == 0: + raise ValueError("no timezones defined") + elif len(self._vtz) > 1: + raise ValueError("more than one timezone available") + tzid = next(iter(self._vtz)) + + return self._vtz.get(tzid) + + def _parse_offset(self, s): + s = s.strip() + if not s: + raise ValueError("empty offset") + if s[0] in ('+', '-'): + signal = (-1, +1)[s[0] == '+'] + s = s[1:] + else: + signal = +1 + if len(s) == 4: + return (int(s[:2]) * 3600 + int(s[2:]) * 60) * signal + elif len(s) == 6: + return (int(s[:2]) * 3600 + int(s[2:4]) * 60 + int(s[4:])) * signal + else: + raise ValueError("invalid offset: " + s) + + def _parse_rfc(self, s): + lines = s.splitlines() + if not lines: + raise ValueError("empty string") + + # Unfold + i = 0 + while i < len(lines): + line = lines[i].rstrip() + if not line: + del lines[i] + elif i > 0 and line[0] == " ": + lines[i-1] += line[1:] + del lines[i] + else: + i += 1 + + tzid = None + comps = [] + invtz = False + comptype = None + for line in lines: + if not line: + continue + name, value = line.split(':', 1) + parms = name.split(';') + if not parms: + raise ValueError("empty property name") + name = parms[0].upper() + parms = parms[1:] + if invtz: + if name == "BEGIN": + if value in ("STANDARD", "DAYLIGHT"): + # Process component + pass + else: + raise ValueError("unknown component: "+value) + comptype = value + founddtstart = False + tzoffsetfrom = None + tzoffsetto = None + rrulelines = [] + tzname = None + elif name == "END": + if value == "VTIMEZONE": + if comptype: + raise ValueError("component not closed: "+comptype) + if not tzid: + raise ValueError("mandatory TZID not found") + if not comps: + raise ValueError( + "at least one component is needed") + # Process vtimezone + self._vtz[tzid] = _tzicalvtz(tzid, comps) + invtz = False + elif value == comptype: + if not founddtstart: + raise ValueError("mandatory DTSTART not found") + if tzoffsetfrom is None: + raise ValueError( + "mandatory TZOFFSETFROM not found") + if tzoffsetto is None: + raise ValueError( + "mandatory TZOFFSETFROM not found") + # Process component + rr = None + if rrulelines: + rr = rrule.rrulestr("\n".join(rrulelines), + compatible=True, + ignoretz=True, + cache=True) + comp = _tzicalvtzcomp(tzoffsetfrom, tzoffsetto, + (comptype == "DAYLIGHT"), + tzname, rr) + comps.append(comp) + comptype = None + else: + raise ValueError("invalid component end: "+value) + elif comptype: + if name == "DTSTART": + # DTSTART in VTIMEZONE takes a subset of valid RRULE + # values under RFC 5545. + for parm in parms: + if parm != 'VALUE=DATE-TIME': + msg = ('Unsupported DTSTART param in ' + + 'VTIMEZONE: ' + parm) + raise ValueError(msg) + rrulelines.append(line) + founddtstart = True + elif name in ("RRULE", "RDATE", "EXRULE", "EXDATE"): + rrulelines.append(line) + elif name == "TZOFFSETFROM": + if parms: + raise ValueError( + "unsupported %s parm: %s " % (name, parms[0])) + tzoffsetfrom = self._parse_offset(value) + elif name == "TZOFFSETTO": + if parms: + raise ValueError( + "unsupported TZOFFSETTO parm: "+parms[0]) + tzoffsetto = self._parse_offset(value) + elif name == "TZNAME": + if parms: + raise ValueError( + "unsupported TZNAME parm: "+parms[0]) + tzname = value + elif name == "COMMENT": + pass + else: + raise ValueError("unsupported property: "+name) + else: + if name == "TZID": + if parms: + raise ValueError( + "unsupported TZID parm: "+parms[0]) + tzid = value + elif name in ("TZURL", "LAST-MODIFIED", "COMMENT"): + pass + else: + raise ValueError("unsupported property: "+name) + elif name == "BEGIN" and value == "VTIMEZONE": + tzid = None + comps = [] + invtz = True + + def __repr__(self): + return "%s(%s)" % (self.__class__.__name__, repr(self._s)) + + +if sys.platform != "win32": + TZFILES = ["/etc/localtime", "localtime"] + TZPATHS = ["/usr/share/zoneinfo", + "/usr/lib/zoneinfo", + "/usr/share/lib/zoneinfo", + "/etc/zoneinfo"] +else: + TZFILES = [] + TZPATHS = [] + + +def __get_gettz(): + tzlocal_classes = (tzlocal,) + if tzwinlocal is not None: + tzlocal_classes += (tzwinlocal,) + + class GettzFunc(object): + """ + Retrieve a time zone object from a string representation + + This function is intended to retrieve the :py:class:`tzinfo` subclass + that best represents the time zone that would be used if a POSIX + `TZ variable`_ were set to the same value. + + If no argument or an empty string is passed to ``gettz``, local time + is returned: + + .. code-block:: python3 + + >>> gettz() + tzfile('/etc/localtime') + + This function is also the preferred way to map IANA tz database keys + to :class:`tzfile` objects: + + .. code-block:: python3 + + >>> gettz('Pacific/Kiritimati') + tzfile('/usr/share/zoneinfo/Pacific/Kiritimati') + + On Windows, the standard is extended to include the Windows-specific + zone names provided by the operating system: + + .. code-block:: python3 + + >>> gettz('Egypt Standard Time') + tzwin('Egypt Standard Time') + + Passing a GNU ``TZ`` style string time zone specification returns a + :class:`tzstr` object: + + .. code-block:: python3 + + >>> gettz('AEST-10AEDT-11,M10.1.0/2,M4.1.0/3') + tzstr('AEST-10AEDT-11,M10.1.0/2,M4.1.0/3') + + :param name: + A time zone name (IANA, or, on Windows, Windows keys), location of + a ``tzfile(5)`` zoneinfo file or ``TZ`` variable style time zone + specifier. An empty string, no argument or ``None`` is interpreted + as local time. + + :return: + Returns an instance of one of ``dateutil``'s :py:class:`tzinfo` + subclasses. + + .. versionchanged:: 2.7.0 + + After version 2.7.0, any two calls to ``gettz`` using the same + input strings will return the same object: + + .. code-block:: python3 + + >>> tz.gettz('America/Chicago') is tz.gettz('America/Chicago') + True + + In addition to improving performance, this ensures that + `"same zone" semantics`_ are used for datetimes in the same zone. + + + .. _`TZ variable`: + https://www.gnu.org/software/libc/manual/html_node/TZ-Variable.html + + .. _`"same zone" semantics`: + https://blog.ganssle.io/articles/2018/02/aware-datetime-arithmetic.html + """ + def __init__(self): + + self.__instances = weakref.WeakValueDictionary() + self.__strong_cache_size = 8 + self.__strong_cache = OrderedDict() + self._cache_lock = _thread.allocate_lock() + + def __call__(self, name=None): + with self._cache_lock: + rv = self.__instances.get(name, None) + + if rv is None: + rv = self.nocache(name=name) + if not (name is None + or isinstance(rv, tzlocal_classes) + or rv is None): + # tzlocal is slightly more complicated than the other + # time zone providers because it depends on environment + # at construction time, so don't cache that. + # + # We also cannot store weak references to None, so we + # will also not store that. + self.__instances[name] = rv + else: + # No need for strong caching, return immediately + return rv + + self.__strong_cache[name] = self.__strong_cache.pop(name, rv) + + if len(self.__strong_cache) > self.__strong_cache_size: + self.__strong_cache.popitem(last=False) + + return rv + + def set_cache_size(self, size): + with self._cache_lock: + self.__strong_cache_size = size + while len(self.__strong_cache) > size: + self.__strong_cache.popitem(last=False) + + def cache_clear(self): + with self._cache_lock: + self.__instances = weakref.WeakValueDictionary() + self.__strong_cache.clear() + + @staticmethod + def nocache(name=None): + """A non-cached version of gettz""" + tz = None + if not name: + try: + name = os.environ["TZ"] + except KeyError: + pass + if name is None or name in ("", ":"): + for filepath in TZFILES: + if not os.path.isabs(filepath): + filename = filepath + for path in TZPATHS: + filepath = os.path.join(path, filename) + if os.path.isfile(filepath): + break + else: + continue + if os.path.isfile(filepath): + try: + tz = tzfile(filepath) + break + except (IOError, OSError, ValueError): + pass + else: + tz = tzlocal() + else: + try: + if name.startswith(":"): + name = name[1:] + except TypeError as e: + if isinstance(name, bytes): + new_msg = "gettz argument should be str, not bytes" + six.raise_from(TypeError(new_msg), e) + else: + raise + if os.path.isabs(name): + if os.path.isfile(name): + tz = tzfile(name) + else: + tz = None + else: + for path in TZPATHS: + filepath = os.path.join(path, name) + if not os.path.isfile(filepath): + filepath = filepath.replace(' ', '_') + if not os.path.isfile(filepath): + continue + try: + tz = tzfile(filepath) + break + except (IOError, OSError, ValueError): + pass + else: + tz = None + if tzwin is not None: + try: + tz = tzwin(name) + except (WindowsError, UnicodeEncodeError): + # UnicodeEncodeError is for Python 2.7 compat + tz = None + + if not tz: + from dateutil.zoneinfo import get_zonefile_instance + tz = get_zonefile_instance().get(name) + + if not tz: + for c in name: + # name is not a tzstr unless it has at least + # one offset. For short values of "name", an + # explicit for loop seems to be the fastest way + # To determine if a string contains a digit + if c in "0123456789": + try: + tz = tzstr(name) + except ValueError: + pass + break + else: + if name in ("GMT", "UTC"): + tz = UTC + elif name in time.tzname: + tz = tzlocal() + return tz + + return GettzFunc() + + +gettz = __get_gettz() +del __get_gettz + + +def datetime_exists(dt, tz=None): + """ + Given a datetime and a time zone, determine whether or not a given datetime + would fall in a gap. + + :param dt: + A :class:`datetime.datetime` (whose time zone will be ignored if ``tz`` + is provided.) + + :param tz: + A :class:`datetime.tzinfo` with support for the ``fold`` attribute. If + ``None`` or not provided, the datetime's own time zone will be used. + + :return: + Returns a boolean value whether or not the "wall time" exists in + ``tz``. + + .. versionadded:: 2.7.0 + """ + if tz is None: + if dt.tzinfo is None: + raise ValueError('Datetime is naive and no time zone provided.') + tz = dt.tzinfo + + dt = dt.replace(tzinfo=None) + + # This is essentially a test of whether or not the datetime can survive + # a round trip to UTC. + dt_rt = dt.replace(tzinfo=tz).astimezone(UTC).astimezone(tz) + dt_rt = dt_rt.replace(tzinfo=None) + + return dt == dt_rt + + +def datetime_ambiguous(dt, tz=None): + """ + Given a datetime and a time zone, determine whether or not a given datetime + is ambiguous (i.e if there are two times differentiated only by their DST + status). + + :param dt: + A :class:`datetime.datetime` (whose time zone will be ignored if ``tz`` + is provided.) + + :param tz: + A :class:`datetime.tzinfo` with support for the ``fold`` attribute. If + ``None`` or not provided, the datetime's own time zone will be used. + + :return: + Returns a boolean value whether or not the "wall time" is ambiguous in + ``tz``. + + .. versionadded:: 2.6.0 + """ + if tz is None: + if dt.tzinfo is None: + raise ValueError('Datetime is naive and no time zone provided.') + + tz = dt.tzinfo + + # If a time zone defines its own "is_ambiguous" function, we'll use that. + is_ambiguous_fn = getattr(tz, 'is_ambiguous', None) + if is_ambiguous_fn is not None: + try: + return tz.is_ambiguous(dt) + except Exception: + pass + + # If it doesn't come out and tell us it's ambiguous, we'll just check if + # the fold attribute has any effect on this particular date and time. + dt = dt.replace(tzinfo=tz) + wall_0 = enfold(dt, fold=0) + wall_1 = enfold(dt, fold=1) + + same_offset = wall_0.utcoffset() == wall_1.utcoffset() + same_dst = wall_0.dst() == wall_1.dst() + + return not (same_offset and same_dst) + + +def resolve_imaginary(dt): + """ + Given a datetime that may be imaginary, return an existing datetime. + + This function assumes that an imaginary datetime represents what the + wall time would be in a zone had the offset transition not occurred, so + it will always fall forward by the transition's change in offset. + + .. doctest:: + + >>> from dateutil import tz + >>> from datetime import datetime + >>> NYC = tz.gettz('America/New_York') + >>> print(tz.resolve_imaginary(datetime(2017, 3, 12, 2, 30, tzinfo=NYC))) + 2017-03-12 03:30:00-04:00 + + >>> KIR = tz.gettz('Pacific/Kiritimati') + >>> print(tz.resolve_imaginary(datetime(1995, 1, 1, 12, 30, tzinfo=KIR))) + 1995-01-02 12:30:00+14:00 + + As a note, :func:`datetime.astimezone` is guaranteed to produce a valid, + existing datetime, so a round-trip to and from UTC is sufficient to get + an extant datetime, however, this generally "falls back" to an earlier time + rather than falling forward to the STD side (though no guarantees are made + about this behavior). + + :param dt: + A :class:`datetime.datetime` which may or may not exist. + + :return: + Returns an existing :class:`datetime.datetime`. If ``dt`` was not + imaginary, the datetime returned is guaranteed to be the same object + passed to the function. + + .. versionadded:: 2.7.0 + """ + if dt.tzinfo is not None and not datetime_exists(dt): + + curr_offset = (dt + datetime.timedelta(hours=24)).utcoffset() + old_offset = (dt - datetime.timedelta(hours=24)).utcoffset() + + dt += curr_offset - old_offset + + return dt + + +def _datetime_to_timestamp(dt): + """ + Convert a :class:`datetime.datetime` object to an epoch timestamp in + seconds since January 1, 1970, ignoring the time zone. + """ + return (dt.replace(tzinfo=None) - EPOCH).total_seconds() + + +if sys.version_info >= (3, 6): + def _get_supported_offset(second_offset): + return second_offset +else: + def _get_supported_offset(second_offset): + # For python pre-3.6, round to full-minutes if that's not the case. + # Python's datetime doesn't accept sub-minute timezones. Check + # http://python.org/sf/1447945 or https://bugs.python.org/issue5288 + # for some information. + old_offset = second_offset + calculated_offset = 60 * ((second_offset + 30) // 60) + return calculated_offset + + +try: + # Python 3.7 feature + from contextlib import nullcontext as _nullcontext +except ImportError: + class _nullcontext(object): + """ + Class for wrapping contexts so that they are passed through in a + with statement. + """ + def __init__(self, context): + self.context = context + + def __enter__(self): + return self.context + + def __exit__(*args, **kwargs): + pass + +# vim:ts=4:sw=4:et diff --git a/third_party/python/python_dateutil/dateutil/tz/win.py b/third_party/python/python_dateutil/dateutil/tz/win.py new file mode 100644 index 0000000000000..cde07ba792c40 --- /dev/null +++ b/third_party/python/python_dateutil/dateutil/tz/win.py @@ -0,0 +1,370 @@ +# -*- coding: utf-8 -*- +""" +This module provides an interface to the native time zone data on Windows, +including :py:class:`datetime.tzinfo` implementations. + +Attempting to import this module on a non-Windows platform will raise an +:py:obj:`ImportError`. +""" +# This code was originally contributed by Jeffrey Harris. +import datetime +import struct + +from six.moves import winreg +from six import text_type + +try: + import ctypes + from ctypes import wintypes +except ValueError: + # ValueError is raised on non-Windows systems for some horrible reason. + raise ImportError("Running tzwin on non-Windows system") + +from ._common import tzrangebase + +__all__ = ["tzwin", "tzwinlocal", "tzres"] + +ONEWEEK = datetime.timedelta(7) + +TZKEYNAMENT = r"SOFTWARE\Microsoft\Windows NT\CurrentVersion\Time Zones" +TZKEYNAME9X = r"SOFTWARE\Microsoft\Windows\CurrentVersion\Time Zones" +TZLOCALKEYNAME = r"SYSTEM\CurrentControlSet\Control\TimeZoneInformation" + + +def _settzkeyname(): + handle = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) + try: + winreg.OpenKey(handle, TZKEYNAMENT).Close() + TZKEYNAME = TZKEYNAMENT + except WindowsError: + TZKEYNAME = TZKEYNAME9X + handle.Close() + return TZKEYNAME + + +TZKEYNAME = _settzkeyname() + + +class tzres(object): + """ + Class for accessing ``tzres.dll``, which contains timezone name related + resources. + + .. versionadded:: 2.5.0 + """ + p_wchar = ctypes.POINTER(wintypes.WCHAR) # Pointer to a wide char + + def __init__(self, tzres_loc='tzres.dll'): + # Load the user32 DLL so we can load strings from tzres + user32 = ctypes.WinDLL('user32') + + # Specify the LoadStringW function + user32.LoadStringW.argtypes = (wintypes.HINSTANCE, + wintypes.UINT, + wintypes.LPWSTR, + ctypes.c_int) + + self.LoadStringW = user32.LoadStringW + self._tzres = ctypes.WinDLL(tzres_loc) + self.tzres_loc = tzres_loc + + def load_name(self, offset): + """ + Load a timezone name from a DLL offset (integer). + + >>> from dateutil.tzwin import tzres + >>> tzr = tzres() + >>> print(tzr.load_name(112)) + 'Eastern Standard Time' + + :param offset: + A positive integer value referring to a string from the tzres dll. + + .. note:: + + Offsets found in the registry are generally of the form + ``@tzres.dll,-114``. The offset in this case is 114, not -114. + + """ + resource = self.p_wchar() + lpBuffer = ctypes.cast(ctypes.byref(resource), wintypes.LPWSTR) + nchar = self.LoadStringW(self._tzres._handle, offset, lpBuffer, 0) + return resource[:nchar] + + def name_from_string(self, tzname_str): + """ + Parse strings as returned from the Windows registry into the time zone + name as defined in the registry. + + >>> from dateutil.tzwin import tzres + >>> tzr = tzres() + >>> print(tzr.name_from_string('@tzres.dll,-251')) + 'Dateline Daylight Time' + >>> print(tzr.name_from_string('Eastern Standard Time')) + 'Eastern Standard Time' + + :param tzname_str: + A timezone name string as returned from a Windows registry key. + + :return: + Returns the localized timezone string from tzres.dll if the string + is of the form `@tzres.dll,-offset`, else returns the input string. + """ + if not tzname_str.startswith('@'): + return tzname_str + + name_splt = tzname_str.split(',-') + try: + offset = int(name_splt[1]) + except: + raise ValueError("Malformed timezone string.") + + return self.load_name(offset) + + +class tzwinbase(tzrangebase): + """tzinfo class based on win32's timezones available in the registry.""" + def __init__(self): + raise NotImplementedError('tzwinbase is an abstract base class') + + def __eq__(self, other): + # Compare on all relevant dimensions, including name. + if not isinstance(other, tzwinbase): + return NotImplemented + + return (self._std_offset == other._std_offset and + self._dst_offset == other._dst_offset and + self._stddayofweek == other._stddayofweek and + self._dstdayofweek == other._dstdayofweek and + self._stdweeknumber == other._stdweeknumber and + self._dstweeknumber == other._dstweeknumber and + self._stdhour == other._stdhour and + self._dsthour == other._dsthour and + self._stdminute == other._stdminute and + self._dstminute == other._dstminute and + self._std_abbr == other._std_abbr and + self._dst_abbr == other._dst_abbr) + + @staticmethod + def list(): + """Return a list of all time zones known to the system.""" + with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle: + with winreg.OpenKey(handle, TZKEYNAME) as tzkey: + result = [winreg.EnumKey(tzkey, i) + for i in range(winreg.QueryInfoKey(tzkey)[0])] + return result + + def display(self): + """ + Return the display name of the time zone. + """ + return self._display + + def transitions(self, year): + """ + For a given year, get the DST on and off transition times, expressed + always on the standard time side. For zones with no transitions, this + function returns ``None``. + + :param year: + The year whose transitions you would like to query. + + :return: + Returns a :class:`tuple` of :class:`datetime.datetime` objects, + ``(dston, dstoff)`` for zones with an annual DST transition, or + ``None`` for fixed offset zones. + """ + + if not self.hasdst: + return None + + dston = picknthweekday(year, self._dstmonth, self._dstdayofweek, + self._dsthour, self._dstminute, + self._dstweeknumber) + + dstoff = picknthweekday(year, self._stdmonth, self._stddayofweek, + self._stdhour, self._stdminute, + self._stdweeknumber) + + # Ambiguous dates default to the STD side + dstoff -= self._dst_base_offset + + return dston, dstoff + + def _get_hasdst(self): + return self._dstmonth != 0 + + @property + def _dst_base_offset(self): + return self._dst_base_offset_ + + +class tzwin(tzwinbase): + """ + Time zone object created from the zone info in the Windows registry + + These are similar to :py:class:`dateutil.tz.tzrange` objects in that + the time zone data is provided in the format of a single offset rule + for either 0 or 2 time zone transitions per year. + + :param: name + The name of a Windows time zone key, e.g. "Eastern Standard Time". + The full list of keys can be retrieved with :func:`tzwin.list`. + """ + + def __init__(self, name): + self._name = name + + with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle: + tzkeyname = text_type("{kn}\\{name}").format(kn=TZKEYNAME, name=name) + with winreg.OpenKey(handle, tzkeyname) as tzkey: + keydict = valuestodict(tzkey) + + self._std_abbr = keydict["Std"] + self._dst_abbr = keydict["Dlt"] + + self._display = keydict["Display"] + + # See http://ww_winreg.jsiinc.com/SUBA/tip0300/rh0398.htm + tup = struct.unpack("=3l16h", keydict["TZI"]) + stdoffset = -tup[0]-tup[1] # Bias + StandardBias * -1 + dstoffset = stdoffset-tup[2] # + DaylightBias * -1 + self._std_offset = datetime.timedelta(minutes=stdoffset) + self._dst_offset = datetime.timedelta(minutes=dstoffset) + + # for the meaning see the win32 TIME_ZONE_INFORMATION structure docs + # http://msdn.microsoft.com/en-us/library/windows/desktop/ms725481(v=vs.85).aspx + (self._stdmonth, + self._stddayofweek, # Sunday = 0 + self._stdweeknumber, # Last = 5 + self._stdhour, + self._stdminute) = tup[4:9] + + (self._dstmonth, + self._dstdayofweek, # Sunday = 0 + self._dstweeknumber, # Last = 5 + self._dsthour, + self._dstminute) = tup[12:17] + + self._dst_base_offset_ = self._dst_offset - self._std_offset + self.hasdst = self._get_hasdst() + + def __repr__(self): + return "tzwin(%s)" % repr(self._name) + + def __reduce__(self): + return (self.__class__, (self._name,)) + + +class tzwinlocal(tzwinbase): + """ + Class representing the local time zone information in the Windows registry + + While :class:`dateutil.tz.tzlocal` makes system calls (via the :mod:`time` + module) to retrieve time zone information, ``tzwinlocal`` retrieves the + rules directly from the Windows registry and creates an object like + :class:`dateutil.tz.tzwin`. + + Because Windows does not have an equivalent of :func:`time.tzset`, on + Windows, :class:`dateutil.tz.tzlocal` instances will always reflect the + time zone settings *at the time that the process was started*, meaning + changes to the machine's time zone settings during the run of a program + on Windows will **not** be reflected by :class:`dateutil.tz.tzlocal`. + Because ``tzwinlocal`` reads the registry directly, it is unaffected by + this issue. + """ + def __init__(self): + with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle: + with winreg.OpenKey(handle, TZLOCALKEYNAME) as tzlocalkey: + keydict = valuestodict(tzlocalkey) + + self._std_abbr = keydict["StandardName"] + self._dst_abbr = keydict["DaylightName"] + + try: + tzkeyname = text_type('{kn}\\{sn}').format(kn=TZKEYNAME, + sn=self._std_abbr) + with winreg.OpenKey(handle, tzkeyname) as tzkey: + _keydict = valuestodict(tzkey) + self._display = _keydict["Display"] + except OSError: + self._display = None + + stdoffset = -keydict["Bias"]-keydict["StandardBias"] + dstoffset = stdoffset-keydict["DaylightBias"] + + self._std_offset = datetime.timedelta(minutes=stdoffset) + self._dst_offset = datetime.timedelta(minutes=dstoffset) + + # For reasons unclear, in this particular key, the day of week has been + # moved to the END of the SYSTEMTIME structure. + tup = struct.unpack("=8h", keydict["StandardStart"]) + + (self._stdmonth, + self._stdweeknumber, # Last = 5 + self._stdhour, + self._stdminute) = tup[1:5] + + self._stddayofweek = tup[7] + + tup = struct.unpack("=8h", keydict["DaylightStart"]) + + (self._dstmonth, + self._dstweeknumber, # Last = 5 + self._dsthour, + self._dstminute) = tup[1:5] + + self._dstdayofweek = tup[7] + + self._dst_base_offset_ = self._dst_offset - self._std_offset + self.hasdst = self._get_hasdst() + + def __repr__(self): + return "tzwinlocal()" + + def __str__(self): + # str will return the standard name, not the daylight name. + return "tzwinlocal(%s)" % repr(self._std_abbr) + + def __reduce__(self): + return (self.__class__, ()) + + +def picknthweekday(year, month, dayofweek, hour, minute, whichweek): + """ dayofweek == 0 means Sunday, whichweek 5 means last instance """ + first = datetime.datetime(year, month, 1, hour, minute) + + # This will work if dayofweek is ISO weekday (1-7) or Microsoft-style (0-6), + # Because 7 % 7 = 0 + weekdayone = first.replace(day=((dayofweek - first.isoweekday()) % 7) + 1) + wd = weekdayone + ((whichweek - 1) * ONEWEEK) + if (wd.month != month): + wd -= ONEWEEK + + return wd + + +def valuestodict(key): + """Convert a registry key's values to a dictionary.""" + dout = {} + size = winreg.QueryInfoKey(key)[1] + tz_res = None + + for i in range(size): + key_name, value, dtype = winreg.EnumValue(key, i) + if dtype == winreg.REG_DWORD or dtype == winreg.REG_DWORD_LITTLE_ENDIAN: + # If it's a DWORD (32-bit integer), it's stored as unsigned - convert + # that to a proper signed integer + if value & (1 << 31): + value = value - (1 << 32) + elif dtype == winreg.REG_SZ: + # If it's a reference to the tzres DLL, load the actual string + if value.startswith('@tzres'): + tz_res = tz_res or tzres() + value = tz_res.name_from_string(value) + + value = value.rstrip('\x00') # Remove trailing nulls + + dout[key_name] = value + + return dout diff --git a/third_party/python/python_dateutil/dateutil/tzwin.py b/third_party/python/python_dateutil/dateutil/tzwin.py new file mode 100644 index 0000000000000..cebc673e40fc3 --- /dev/null +++ b/third_party/python/python_dateutil/dateutil/tzwin.py @@ -0,0 +1,2 @@ +# tzwin has moved to dateutil.tz.win +from .tz.win import * diff --git a/third_party/python/python_dateutil/dateutil/utils.py b/third_party/python/python_dateutil/dateutil/utils.py new file mode 100644 index 0000000000000..dd2d245a0bebc --- /dev/null +++ b/third_party/python/python_dateutil/dateutil/utils.py @@ -0,0 +1,71 @@ +# -*- coding: utf-8 -*- +""" +This module offers general convenience and utility functions for dealing with +datetimes. + +.. versionadded:: 2.7.0 +""" +from __future__ import unicode_literals + +from datetime import datetime, time + + +def today(tzinfo=None): + """ + Returns a :py:class:`datetime` representing the current day at midnight + + :param tzinfo: + The time zone to attach (also used to determine the current day). + + :return: + A :py:class:`datetime.datetime` object representing the current day + at midnight. + """ + + dt = datetime.now(tzinfo) + return datetime.combine(dt.date(), time(0, tzinfo=tzinfo)) + + +def default_tzinfo(dt, tzinfo): + """ + Sets the ``tzinfo`` parameter on naive datetimes only + + This is useful for example when you are provided a datetime that may have + either an implicit or explicit time zone, such as when parsing a time zone + string. + + .. doctest:: + + >>> from dateutil.tz import tzoffset + >>> from dateutil.parser import parse + >>> from dateutil.utils import default_tzinfo + >>> dflt_tz = tzoffset("EST", -18000) + >>> print(default_tzinfo(parse('2014-01-01 12:30 UTC'), dflt_tz)) + 2014-01-01 12:30:00+00:00 + >>> print(default_tzinfo(parse('2014-01-01 12:30'), dflt_tz)) + 2014-01-01 12:30:00-05:00 + + :param dt: + The datetime on which to replace the time zone + + :param tzinfo: + The :py:class:`datetime.tzinfo` subclass instance to assign to + ``dt`` if (and only if) it is naive. + + :return: + Returns an aware :py:class:`datetime.datetime`. + """ + if dt.tzinfo is not None: + return dt + else: + return dt.replace(tzinfo=tzinfo) + + +def within_delta(dt1, dt2, delta): + """ + Useful for comparing two datetimes that may have a negligible difference + to be considered equal. + """ + delta = abs(delta) + difference = dt1 - dt2 + return -delta <= difference <= delta diff --git a/third_party/python/python_dateutil/dateutil/zoneinfo/__init__.py b/third_party/python/python_dateutil/dateutil/zoneinfo/__init__.py new file mode 100644 index 0000000000000..34f11ad66c880 --- /dev/null +++ b/third_party/python/python_dateutil/dateutil/zoneinfo/__init__.py @@ -0,0 +1,167 @@ +# -*- coding: utf-8 -*- +import warnings +import json + +from tarfile import TarFile +from pkgutil import get_data +from io import BytesIO + +from dateutil.tz import tzfile as _tzfile + +__all__ = ["get_zonefile_instance", "gettz", "gettz_db_metadata"] + +ZONEFILENAME = "dateutil-zoneinfo.tar.gz" +METADATA_FN = 'METADATA' + + +class tzfile(_tzfile): + def __reduce__(self): + return (gettz, (self._filename,)) + + +def getzoneinfofile_stream(): + try: + return BytesIO(get_data(__name__, ZONEFILENAME)) + except IOError as e: # TODO switch to FileNotFoundError? + warnings.warn("I/O error({0}): {1}".format(e.errno, e.strerror)) + return None + + +class ZoneInfoFile(object): + def __init__(self, zonefile_stream=None): + if zonefile_stream is not None: + with TarFile.open(fileobj=zonefile_stream) as tf: + self.zones = {zf.name: tzfile(tf.extractfile(zf), filename=zf.name) + for zf in tf.getmembers() + if zf.isfile() and zf.name != METADATA_FN} + # deal with links: They'll point to their parent object. Less + # waste of memory + links = {zl.name: self.zones[zl.linkname] + for zl in tf.getmembers() if + zl.islnk() or zl.issym()} + self.zones.update(links) + try: + metadata_json = tf.extractfile(tf.getmember(METADATA_FN)) + metadata_str = metadata_json.read().decode('UTF-8') + self.metadata = json.loads(metadata_str) + except KeyError: + # no metadata in tar file + self.metadata = None + else: + self.zones = {} + self.metadata = None + + def get(self, name, default=None): + """ + Wrapper for :func:`ZoneInfoFile.zones.get`. This is a convenience method + for retrieving zones from the zone dictionary. + + :param name: + The name of the zone to retrieve. (Generally IANA zone names) + + :param default: + The value to return in the event of a missing key. + + .. versionadded:: 2.6.0 + + """ + return self.zones.get(name, default) + + +# The current API has gettz as a module function, although in fact it taps into +# a stateful class. So as a workaround for now, without changing the API, we +# will create a new "global" class instance the first time a user requests a +# timezone. Ugly, but adheres to the api. +# +# TODO: Remove after deprecation period. +_CLASS_ZONE_INSTANCE = [] + + +def get_zonefile_instance(new_instance=False): + """ + This is a convenience function which provides a :class:`ZoneInfoFile` + instance using the data provided by the ``dateutil`` package. By default, it + caches a single instance of the ZoneInfoFile object and returns that. + + :param new_instance: + If ``True``, a new instance of :class:`ZoneInfoFile` is instantiated and + used as the cached instance for the next call. Otherwise, new instances + are created only as necessary. + + :return: + Returns a :class:`ZoneInfoFile` object. + + .. versionadded:: 2.6 + """ + if new_instance: + zif = None + else: + zif = getattr(get_zonefile_instance, '_cached_instance', None) + + if zif is None: + zif = ZoneInfoFile(getzoneinfofile_stream()) + + get_zonefile_instance._cached_instance = zif + + return zif + + +def gettz(name): + """ + This retrieves a time zone from the local zoneinfo tarball that is packaged + with dateutil. + + :param name: + An IANA-style time zone name, as found in the zoneinfo file. + + :return: + Returns a :class:`dateutil.tz.tzfile` time zone object. + + .. warning:: + It is generally inadvisable to use this function, and it is only + provided for API compatibility with earlier versions. This is *not* + equivalent to ``dateutil.tz.gettz()``, which selects an appropriate + time zone based on the inputs, favoring system zoneinfo. This is ONLY + for accessing the dateutil-specific zoneinfo (which may be out of + date compared to the system zoneinfo). + + .. deprecated:: 2.6 + If you need to use a specific zoneinfofile over the system zoneinfo, + instantiate a :class:`dateutil.zoneinfo.ZoneInfoFile` object and call + :func:`dateutil.zoneinfo.ZoneInfoFile.get(name)` instead. + + Use :func:`get_zonefile_instance` to retrieve an instance of the + dateutil-provided zoneinfo. + """ + warnings.warn("zoneinfo.gettz() will be removed in future versions, " + "to use the dateutil-provided zoneinfo files, instantiate a " + "ZoneInfoFile object and use ZoneInfoFile.zones.get() " + "instead. See the documentation for details.", + DeprecationWarning) + + if len(_CLASS_ZONE_INSTANCE) == 0: + _CLASS_ZONE_INSTANCE.append(ZoneInfoFile(getzoneinfofile_stream())) + return _CLASS_ZONE_INSTANCE[0].zones.get(name) + + +def gettz_db_metadata(): + """ Get the zonefile metadata + + See `zonefile_metadata`_ + + :returns: + A dictionary with the database metadata + + .. deprecated:: 2.6 + See deprecation warning in :func:`zoneinfo.gettz`. To get metadata, + query the attribute ``zoneinfo.ZoneInfoFile.metadata``. + """ + warnings.warn("zoneinfo.gettz_db_metadata() will be removed in future " + "versions, to use the dateutil-provided zoneinfo files, " + "ZoneInfoFile object and query the 'metadata' attribute " + "instead. See the documentation for details.", + DeprecationWarning) + + if len(_CLASS_ZONE_INSTANCE) == 0: + _CLASS_ZONE_INSTANCE.append(ZoneInfoFile(getzoneinfofile_stream())) + return _CLASS_ZONE_INSTANCE[0].metadata diff --git a/third_party/python/python_dateutil/dateutil/zoneinfo/rebuild.py b/third_party/python/python_dateutil/dateutil/zoneinfo/rebuild.py new file mode 100644 index 0000000000000..684c6586f0913 --- /dev/null +++ b/third_party/python/python_dateutil/dateutil/zoneinfo/rebuild.py @@ -0,0 +1,75 @@ +import logging +import os +import tempfile +import shutil +import json +from subprocess import check_call, check_output +from tarfile import TarFile + +from dateutil.zoneinfo import METADATA_FN, ZONEFILENAME + + +def rebuild(filename, tag=None, format="gz", zonegroups=[], metadata=None): + """Rebuild the internal timezone info in dateutil/zoneinfo/zoneinfo*tar* + + filename is the timezone tarball from ``ftp.iana.org/tz``. + + """ + tmpdir = tempfile.mkdtemp() + zonedir = os.path.join(tmpdir, "zoneinfo") + moduledir = os.path.dirname(__file__) + try: + with TarFile.open(filename) as tf: + for name in zonegroups: + tf.extract(name, tmpdir) + filepaths = [os.path.join(tmpdir, n) for n in zonegroups] + + _run_zic(zonedir, filepaths) + + # write metadata file + with open(os.path.join(zonedir, METADATA_FN), 'w') as f: + json.dump(metadata, f, indent=4, sort_keys=True) + target = os.path.join(moduledir, ZONEFILENAME) + with TarFile.open(target, "w:%s" % format) as tf: + for entry in os.listdir(zonedir): + entrypath = os.path.join(zonedir, entry) + tf.add(entrypath, entry) + finally: + shutil.rmtree(tmpdir) + + +def _run_zic(zonedir, filepaths): + """Calls the ``zic`` compiler in a compatible way to get a "fat" binary. + + Recent versions of ``zic`` default to ``-b slim``, while older versions + don't even have the ``-b`` option (but default to "fat" binaries). The + current version of dateutil does not support Version 2+ TZif files, which + causes problems when used in conjunction with "slim" binaries, so this + function is used to ensure that we always get a "fat" binary. + """ + + try: + help_text = check_output(["zic", "--help"]) + except OSError as e: + _print_on_nosuchfile(e) + raise + + if b"-b " in help_text: + bloat_args = ["-b", "fat"] + else: + bloat_args = [] + + check_call(["zic"] + bloat_args + ["-d", zonedir] + filepaths) + + +def _print_on_nosuchfile(e): + """Print helpful troubleshooting message + + e is an exception raised by subprocess.check_call() + + """ + if e.errno == 2: + logging.error( + "Could not find zic. Perhaps you need to install " + "libc-bin or some other package that provides it, " + "or it's not in your PATH?") diff --git a/third_party/python/python_dateutil/python_dateutil-2.8.2.dist-info/LICENSE b/third_party/python/python_dateutil/python_dateutil-2.8.2.dist-info/LICENSE new file mode 100644 index 0000000000000..1e65815cf0b31 --- /dev/null +++ b/third_party/python/python_dateutil/python_dateutil-2.8.2.dist-info/LICENSE @@ -0,0 +1,54 @@ +Copyright 2017- Paul Ganssle +Copyright 2017- dateutil contributors (see AUTHORS file) + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +The above license applies to all contributions after 2017-12-01, as well as +all contributions that have been re-licensed (see AUTHORS file for the list of +contributors who have re-licensed their code). +-------------------------------------------------------------------------------- +dateutil - Extensions to the standard Python datetime module. + +Copyright (c) 2003-2011 - Gustavo Niemeyer +Copyright (c) 2012-2014 - Tomi Pieviläinen +Copyright (c) 2014-2016 - Yaron de Leeuw +Copyright (c) 2015- - Paul Ganssle +Copyright (c) 2015- - dateutil contributors (see AUTHORS file) + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +The above BSD License Applies to all code, even that also covered by Apache 2.0. \ No newline at end of file diff --git a/third_party/python/python_dateutil/python_dateutil-2.8.2.dist-info/METADATA b/third_party/python/python_dateutil/python_dateutil-2.8.2.dist-info/METADATA new file mode 100644 index 0000000000000..1e46c96a44bcc --- /dev/null +++ b/third_party/python/python_dateutil/python_dateutil-2.8.2.dist-info/METADATA @@ -0,0 +1,204 @@ +Metadata-Version: 2.1 +Name: python-dateutil +Version: 2.8.2 +Summary: Extensions to the standard Python datetime module +Home-page: https://github.com/dateutil/dateutil +Author: Gustavo Niemeyer +Author-email: gustavo@niemeyer.net +Maintainer: Paul Ganssle +Maintainer-email: dateutil@python.org +License: Dual License +Project-URL: Documentation, https://dateutil.readthedocs.io/en/stable/ +Project-URL: Source, https://github.com/dateutil/dateutil +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: License :: OSI Approved :: Apache Software License +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Topic :: Software Development :: Libraries +Requires-Python: !=3.0.*,!=3.1.*,!=3.2.*,>=2.7 +Description-Content-Type: text/x-rst +License-File: LICENSE +Requires-Dist: six (>=1.5) + +dateutil - powerful extensions to datetime +========================================== + +|pypi| |support| |licence| + +|gitter| |readthedocs| + +|travis| |appveyor| |pipelines| |coverage| + +.. |pypi| image:: https://img.shields.io/pypi/v/python-dateutil.svg?style=flat-square + :target: https://pypi.org/project/python-dateutil/ + :alt: pypi version + +.. |support| image:: https://img.shields.io/pypi/pyversions/python-dateutil.svg?style=flat-square + :target: https://pypi.org/project/python-dateutil/ + :alt: supported Python version + +.. |travis| image:: https://img.shields.io/travis/dateutil/dateutil/master.svg?style=flat-square&label=Travis%20Build + :target: https://travis-ci.org/dateutil/dateutil + :alt: travis build status + +.. |appveyor| image:: https://img.shields.io/appveyor/ci/dateutil/dateutil/master.svg?style=flat-square&logo=appveyor + :target: https://ci.appveyor.com/project/dateutil/dateutil + :alt: appveyor build status + +.. |pipelines| image:: https://dev.azure.com/pythondateutilazure/dateutil/_apis/build/status/dateutil.dateutil?branchName=master + :target: https://dev.azure.com/pythondateutilazure/dateutil/_build/latest?definitionId=1&branchName=master + :alt: azure pipelines build status + +.. |coverage| image:: https://codecov.io/gh/dateutil/dateutil/branch/master/graphs/badge.svg?branch=master + :target: https://codecov.io/gh/dateutil/dateutil?branch=master + :alt: Code coverage + +.. |gitter| image:: https://badges.gitter.im/dateutil/dateutil.svg + :alt: Join the chat at https://gitter.im/dateutil/dateutil + :target: https://gitter.im/dateutil/dateutil + +.. |licence| image:: https://img.shields.io/pypi/l/python-dateutil.svg?style=flat-square + :target: https://pypi.org/project/python-dateutil/ + :alt: licence + +.. |readthedocs| image:: https://img.shields.io/readthedocs/dateutil/latest.svg?style=flat-square&label=Read%20the%20Docs + :alt: Read the documentation at https://dateutil.readthedocs.io/en/latest/ + :target: https://dateutil.readthedocs.io/en/latest/ + +The `dateutil` module provides powerful extensions to +the standard `datetime` module, available in Python. + +Installation +============ +`dateutil` can be installed from PyPI using `pip` (note that the package name is +different from the importable name):: + + pip install python-dateutil + +Download +======== +dateutil is available on PyPI +https://pypi.org/project/python-dateutil/ + +The documentation is hosted at: +https://dateutil.readthedocs.io/en/stable/ + +Code +==== +The code and issue tracker are hosted on GitHub: +https://github.com/dateutil/dateutil/ + +Features +======== + +* Computing of relative deltas (next month, next year, + next Monday, last week of month, etc); +* Computing of relative deltas between two given + date and/or datetime objects; +* Computing of dates based on very flexible recurrence rules, + using a superset of the `iCalendar `_ + specification. Parsing of RFC strings is supported as well. +* Generic parsing of dates in almost any string format; +* Timezone (tzinfo) implementations for tzfile(5) format + files (/etc/localtime, /usr/share/zoneinfo, etc), TZ + environment string (in all known formats), iCalendar + format files, given ranges (with help from relative deltas), + local machine timezone, fixed offset timezone, UTC timezone, + and Windows registry-based time zones. +* Internal up-to-date world timezone information based on + Olson's database. +* Computing of Easter Sunday dates for any given year, + using Western, Orthodox or Julian algorithms; +* A comprehensive test suite. + +Quick example +============= +Here's a snapshot, just to give an idea about the power of the +package. For more examples, look at the documentation. + +Suppose you want to know how much time is left, in +years/months/days/etc, before the next easter happening on a +year with a Friday 13th in August, and you want to get today's +date out of the "date" unix system command. Here is the code: + +.. code-block:: python3 + + >>> from dateutil.relativedelta import * + >>> from dateutil.easter import * + >>> from dateutil.rrule import * + >>> from dateutil.parser import * + >>> from datetime import * + >>> now = parse("Sat Oct 11 17:13:46 UTC 2003") + >>> today = now.date() + >>> year = rrule(YEARLY,dtstart=now,bymonth=8,bymonthday=13,byweekday=FR)[0].year + >>> rdelta = relativedelta(easter(year), today) + >>> print("Today is: %s" % today) + Today is: 2003-10-11 + >>> print("Year with next Aug 13th on a Friday is: %s" % year) + Year with next Aug 13th on a Friday is: 2004 + >>> print("How far is the Easter of that year: %s" % rdelta) + How far is the Easter of that year: relativedelta(months=+6) + >>> print("And the Easter of that year is: %s" % (today+rdelta)) + And the Easter of that year is: 2004-04-11 + +Being exactly 6 months ahead was **really** a coincidence :) + +Contributing +============ + +We welcome many types of contributions - bug reports, pull requests (code, infrastructure or documentation fixes). For more information about how to contribute to the project, see the ``CONTRIBUTING.md`` file in the repository. + + +Author +====== +The dateutil module was written by Gustavo Niemeyer +in 2003. + +It is maintained by: + +* Gustavo Niemeyer 2003-2011 +* Tomi Pieviläinen 2012-2014 +* Yaron de Leeuw 2014-2016 +* Paul Ganssle 2015- + +Starting with version 2.4.1 and running until 2.8.2, all source and binary +distributions will be signed by a PGP key that has, at the very least, been +signed by the key which made the previous release. A table of release signing +keys can be found below: + +=========== ============================ +Releases Signing key fingerprint +=========== ============================ +2.4.1-2.8.2 `6B49 ACBA DCF6 BD1C A206 67AB CD54 FCE3 D964 BEFB`_ +=========== ============================ + +New releases *may* have signed tags, but binary and source distributions +uploaded to PyPI will no longer have GPG signatures attached. + +Contact +======= +Our mailing list is available at `dateutil@python.org `_. As it is hosted by the PSF, it is subject to the `PSF code of +conduct `_. + +License +======= + +All contributions after December 1, 2017 released under dual license - either `Apache 2.0 License `_ or the `BSD 3-Clause License `_. Contributions before December 1, 2017 - except those those explicitly relicensed - are released only under the BSD 3-Clause License. + + +.. _6B49 ACBA DCF6 BD1C A206 67AB CD54 FCE3 D964 BEFB: + https://pgp.mit.edu/pks/lookup?op=vindex&search=0xCD54FCE3D964BEFB + + diff --git a/third_party/python/python_dateutil/python_dateutil-2.8.2.dist-info/RECORD b/third_party/python/python_dateutil/python_dateutil-2.8.2.dist-info/RECORD new file mode 100644 index 0000000000000..d03ea8c3398ef --- /dev/null +++ b/third_party/python/python_dateutil/python_dateutil-2.8.2.dist-info/RECORD @@ -0,0 +1,25 @@ +dateutil/__init__.py,sha256=lXElASqwYGwqlrSWSeX19JwF5Be9tNecDa9ebk-0gmk,222 +dateutil/_common.py,sha256=77w0yytkrxlYbSn--lDVPUMabUXRR9I3lBv_vQRUqUY,932 +dateutil/_version.py,sha256=awyHv2PYvDR84dxjrHyzmm8nieFwMjcuuShPh-QNkM4,142 +dateutil/easter.py,sha256=dyBi-lKvimH1u_k6p7Z0JJK72QhqVtVBsqByvpEPKvc,2678 +dateutil/relativedelta.py,sha256=GjVxqpAVWnG67rdbf7pkoIlJvQqmju9NSfGCcqblc7U,24904 +dateutil/rrule.py,sha256=b6GVV4MpZDbBhJ5qitQKRyx8-_OKyeAbk57or2A8AYU,66556 +dateutil/tzwin.py,sha256=7Ar4vdQCnnM0mKR3MUjbIKsZrBVfHgdwsJZc_mGYRew,59 +dateutil/utils.py,sha256=dKCchEw8eObi0loGTx91unBxm_7UGlU3v_FjFMdqwYM,1965 +dateutil/parser/__init__.py,sha256=wWk6GFuxTpjoggCGtgkceJoti4pVjl4_fHQXpNOaSYg,1766 +dateutil/parser/_parser.py,sha256=7klDdyicksQB_Xgl-3UAmBwzCYor1AIZqklIcT6dH_8,58796 +dateutil/parser/isoparser.py,sha256=EtLY7w22HWx-XJpTWxJD3XNs6LBHRCps77tCdLnYad8,13247 +dateutil/tz/__init__.py,sha256=F-Mz13v6jYseklQf9Te9J6nzcLDmq47gORa61K35_FA,444 +dateutil/tz/_common.py,sha256=cgzDTANsOXvEc86cYF77EsliuSab8Puwpsl5-bX3_S4,12977 +dateutil/tz/_factories.py,sha256=unb6XQNXrPMveksTCU-Ag8jmVZs4SojoPUcAHpWnrvU,2569 +dateutil/tz/tz.py,sha256=JotVjDcF16hzoouQ0kZW-5mCYu7Xj67NI-VQgnWapKE,62857 +dateutil/tz/win.py,sha256=xJszWgSwE1xPx_HJj4ZkepyukC_hNy016WMcXhbRaB8,12935 +dateutil/zoneinfo/__init__.py,sha256=KYg0pthCMjcp5MXSEiBJn3nMjZeNZav7rlJw5-tz1S4,5889 +dateutil/zoneinfo/dateutil-zoneinfo.tar.gz,sha256=AkcdBx3XkEZwMSpS_TmOEfrEFHLvgxPNDVIwGVxTVaI,174394 +dateutil/zoneinfo/rebuild.py,sha256=MiqYzCIHvNbMH-LdRYLv-4T0EIA7hDKt5GLR0IRTLdI,2392 +python_dateutil-2.8.2.dist-info/LICENSE,sha256=ugD1Gg2SgjtaHN4n2LW50jIeZ-2NqbwWPv-W1eF-V34,2889 +python_dateutil-2.8.2.dist-info/METADATA,sha256=RDHtGo7BnYRjmYxot_wlu_W3N2CyvPtvchbtyIlKKPA,8218 +python_dateutil-2.8.2.dist-info/WHEEL,sha256=Z-nyYpwrcSqxfdux5Mbn_DQ525iP7J2DG3JgGvOYyTQ,110 +python_dateutil-2.8.2.dist-info/top_level.txt,sha256=4tjdWkhRZvF7LA_BYe_L9gB2w_p2a-z5y6ArjaRkot8,9 +python_dateutil-2.8.2.dist-info/zip-safe,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1 +python_dateutil-2.8.2.dist-info/RECORD,, diff --git a/third_party/python/python_dateutil/python_dateutil-2.8.2.dist-info/WHEEL b/third_party/python/python_dateutil/python_dateutil-2.8.2.dist-info/WHEEL new file mode 100644 index 0000000000000..01b8fc7d4a10c --- /dev/null +++ b/third_party/python/python_dateutil/python_dateutil-2.8.2.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.36.2) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/third_party/python/python_dateutil/python_dateutil-2.8.2.dist-info/top_level.txt b/third_party/python/python_dateutil/python_dateutil-2.8.2.dist-info/top_level.txt new file mode 100644 index 0000000000000..66501480ba5b6 --- /dev/null +++ b/third_party/python/python_dateutil/python_dateutil-2.8.2.dist-info/top_level.txt @@ -0,0 +1 @@ +dateutil diff --git a/third_party/python/python_dateutil/python_dateutil-2.8.2.dist-info/zip-safe b/third_party/python/python_dateutil/python_dateutil-2.8.2.dist-info/zip-safe new file mode 100644 index 0000000000000..8b137891791fe --- /dev/null +++ b/third_party/python/python_dateutil/python_dateutil-2.8.2.dist-info/zip-safe @@ -0,0 +1 @@ + diff --git a/third_party/python/python_slugify/python_slugify-8.0.1.dist-info/LICENSE b/third_party/python/python_slugify/python_slugify-8.0.1.dist-info/LICENSE new file mode 100644 index 0000000000000..82af695f594e8 --- /dev/null +++ b/third_party/python/python_slugify/python_slugify-8.0.1.dist-info/LICENSE @@ -0,0 +1,21 @@ +The MIT License + +Copyright (c) Val Neekman @ Neekware Inc. http://neekware.com + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/third_party/python/python_slugify/python_slugify-8.0.1.dist-info/METADATA b/third_party/python/python_slugify/python_slugify-8.0.1.dist-info/METADATA new file mode 100644 index 0000000000000..e6948ae2f6452 --- /dev/null +++ b/third_party/python/python_slugify/python_slugify-8.0.1.dist-info/METADATA @@ -0,0 +1,247 @@ +Metadata-Version: 2.1 +Name: python-slugify +Version: 8.0.1 +Summary: A Python slugify application that also handles Unicode +Home-page: https://github.com/un33k/python-slugify +Author: Val Neekman +Author-email: info@neekware.com +License: MIT +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Natural Language :: English +Classifier: License :: OSI Approved :: MIT License +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Requires-Python: >=3.7 +Description-Content-Type: text/markdown +Requires-Dist: text-unidecode (>=1.3) +Provides-Extra: unidecode +Requires-Dist: Unidecode (>=1.1.1) ; extra == 'unidecode' + +# Python Slugify + +**A Python slugify application that handles unicode**. + +[![status-image]][status-link] +[![version-image]][version-link] +[![coverage-image]][coverage-link] + +# Overview + +**Best attempt** to create slugs from unicode strings while keeping it **DRY**. + +# Notice + +This module, by default installs and uses [text-unidecode](https://github.com/kmike/text-unidecode) _(GPL & Perl Artistic)_ for its decoding needs. + +However, there is an alternative decoding package called [Unidecode](https://github.com/avian2/unidecode) _(GPL)_. It can be installed as `python-slugify[unidecode]` for those who prefer it. `Unidecode` is believed to be more advanced. + +### `Official` Support Matrix + +| Python | Slugify | +| -------------- | ------------------ | +| `>= 2.7 < 3.6` | `< 5.0.0` | +| `>= 3.6 < 3.7` | `>= 5.0.0 < 7.0.0` | +| `>= 3.7` | `>= 7.0.0` | + +# How to install + + easy_install python-slugify |OR| easy_install python-slugify[unidecode] + -- OR -- + pip install python-slugify |OR| pip install python-slugify[unidecode] + +# Options + +```python +def slugify( + text, + entities=True, + decimal=True, + hexadecimal=True, + max_length=0, + word_boundary=False, + separator='-', + save_order=False, + stopwords=(), + regex_pattern=None, + lowercase=True, + replacements=(), + allow_unicode=False + ): + """ + Make a slug from the given text. + :param text (str): initial text + :param entities (bool): converts html entities to unicode (foo & bar -> foo-bar) + :param decimal (bool): converts html decimal to unicode (Ž -> Ž -> z) + :param hexadecimal (bool): converts html hexadecimal to unicode (Ž -> Ž -> z) + :param max_length (int): output string length + :param word_boundary (bool): truncates to end of full words (length may be shorter than max_length) + :param save_order (bool): if parameter is True and max_length > 0 return whole words in the initial order + :param separator (str): separator between words + :param stopwords (iterable): words to discount + :param regex_pattern (str): regex pattern for disallowed characters + :param lowercase (bool): activate case sensitivity by setting it to False + :param replacements (iterable): list of replacement rules e.g. [['|', 'or'], ['%', 'percent']] + :param allow_unicode (bool): allow unicode characters + :return (str): slugify text + """ +``` + +# How to use + +```python +from slugify import slugify + +txt = "This is a test ---" +r = slugify(txt) +self.assertEqual(r, "this-is-a-test") + +txt = '影師嗎' +r = slugify(txt) +self.assertEqual(r, "ying-shi-ma") + +txt = '影師嗎' +r = slugify(txt, allow_unicode=True) +self.assertEqual(r, "影師嗎") + +txt = 'C\'est déjà l\'été.' +r = slugify(txt) +self.assertEqual(r, "c-est-deja-l-ete") + +txt = 'Nín hǎo. Wǒ shì zhōng guó rén' +r = slugify(txt) +self.assertEqual(r, "nin-hao-wo-shi-zhong-guo-ren") + +txt = 'Компьютер' +r = slugify(txt) +self.assertEqual(r, "kompiuter") + +txt = 'jaja---lol-méméméoo--a' +r = slugify(txt, max_length=9) +self.assertEqual(r, "jaja-lol") + +txt = 'jaja---lol-méméméoo--a' +r = slugify(txt, max_length=15, word_boundary=True) +self.assertEqual(r, "jaja-lol-a") + +txt = 'jaja---lol-méméméoo--a' +r = slugify(txt, max_length=20, word_boundary=True, separator=".") +self.assertEqual(r, "jaja.lol.mememeoo.a") + +txt = 'one two three four five' +r = slugify(txt, max_length=13, word_boundary=True, save_order=True) +self.assertEqual(r, "one-two-three") + +txt = 'the quick brown fox jumps over the lazy dog' +r = slugify(txt, stopwords=['the']) +self.assertEqual(r, 'quick-brown-fox-jumps-over-lazy-dog') + +txt = 'the quick brown fox jumps over the lazy dog in a hurry' +r = slugify(txt, stopwords=['the', 'in', 'a', 'hurry']) +self.assertEqual(r, 'quick-brown-fox-jumps-over-lazy-dog') + +txt = 'thIs Has a stopword Stopword' +r = slugify(txt, stopwords=['Stopword'], lowercase=False) +self.assertEqual(r, 'thIs-Has-a-stopword') + +txt = "___This is a test___" +regex_pattern = r'[^-a-z0-9_]+' +r = slugify(txt, regex_pattern=regex_pattern) +self.assertEqual(r, "___this-is-a-test___") + +txt = "___This is a test___" +regex_pattern = r'[^-a-z0-9_]+' +r = slugify(txt, separator='_', regex_pattern=regex_pattern) +self.assertNotEqual(r, "_this_is_a_test_") + +txt = '10 | 20 %' +r = slugify(txt, replacements=[['|', 'or'], ['%', 'percent']]) +self.assertEqual(r, "10-or-20-percent") + +txt = 'ÜBER Über German Umlaut' +r = slugify(txt, replacements=[['Ü', 'UE'], ['ü', 'ue']]) +self.assertEqual(r, "ueber-ueber-german-umlaut") + +txt = 'i love 🦄' +r = slugify(txt, allow_unicode=True) +self.assertEqual(r, "i-love") + +txt = 'i love 🦄' +r = slugify(txt, allow_unicode=True, regex_pattern=r'[^🦄]+') +self.assertEqual(r, "🦄") + +``` + +For more examples, have a look at the [test.py](test.py) file. + +# Command Line Options + +With the package, a command line tool called `slugify` is also installed. + +It allows convenient command line access to all the features the `slugify` function supports. Call it with `-h` for help. + +The command can take its input directly on the command line or from STDIN (when the `--stdin` flag is passed): + +``` +$ echo "Taking input from STDIN" | slugify --stdin +taking-input-from-stdin +``` + +``` +$ slugify taking input from the command line +taking-input-from-the-command-line +``` + +Please note that when a multi-valued option such as `--stopwords` or `--replacements` is passed, you need to use `--` as separator before you start with the input: + +``` +$ slugify --stopwords the in a hurry -- the quick brown fox jumps over the lazy dog in a hurry +quick-brown-fox-jumps-over-lazy-dog +``` + +# Running the tests + +To run the tests against the current environment: + + python test.py + +# Contribution + +Please read the ([wiki](https://github.com/un33k/python-slugify/wiki/Python-Slugify-Wiki)) page prior to raising any PRs. + +# License + +Released under a ([MIT](LICENSE)) license. + +### Notes on GPL dependencies +Though the dependencies may be GPL licensed, `python-slugify` itself is not considered a derivative work and will remain under the MIT license. +If you wish to avoid installation of any GPL licensed packages, please note that the default dependency `text-unidecode` explicitly lets you choose to use the [Artistic License](https://opensource.org/license/artistic-perl-1-0-2/) instead. Use without concern. + +# Version + +X.Y.Z Version + + `MAJOR` version -- when you make incompatible API changes, + `MINOR` version -- when you add functionality in a backwards-compatible manner, and + `PATCH` version -- when you make backwards-compatible bug fixes. + +[status-image]: https://github.com/un33k/python-slugify/actions/workflows/ci.yml/badge.svg +[status-link]: https://github.com/un33k/python-slugify/actions/workflows/ci.yml +[version-image]: https://img.shields.io/pypi/v/python-slugify.svg +[version-link]: https://pypi.python.org/pypi/python-slugify +[coverage-image]: https://coveralls.io/repos/un33k/python-slugify/badge.svg +[coverage-link]: https://coveralls.io/r/un33k/python-slugify +[download-image]: https://img.shields.io/pypi/dm/python-slugify.svg +[download-link]: https://pypi.python.org/pypi/python-slugify + +# Sponsors + +[Neekware Inc.](http://neekware.com) + + diff --git a/third_party/python/python_slugify/python_slugify-8.0.1.dist-info/RECORD b/third_party/python/python_slugify/python_slugify-8.0.1.dist-info/RECORD new file mode 100644 index 0000000000000..6ec16d7e1cd44 --- /dev/null +++ b/third_party/python/python_slugify/python_slugify-8.0.1.dist-info/RECORD @@ -0,0 +1,11 @@ +slugify/__init__.py,sha256=Q-9bKCQv89uf3bJr_yHxMPhBWXN8YCzlxQwK_kdpefI,346 +slugify/__main__.py,sha256=3EVQris1UpnWMgvjeVLDvzRXGBqkNkdpzFPmez5syuU,3866 +slugify/__version__.py,sha256=EzSzGa2hG-1z11YrS38w8w2tmCoQqEHO46xcvQsiFgI,325 +slugify/slugify.py,sha256=v8rRfSR2I4QiRNoG0FpL0TabbKUelZmtYnQjHfwvp6I,5795 +slugify/special.py,sha256=uV3YMYay1HTaP3nvyzaiV4FqGazjj8HmDHM1fsPQ3oo,1167 +python_slugify-8.0.1.dist-info/LICENSE,sha256=MLpNxpqfTc4TLdcDk3x6k7Vz4lJGBNLV-SxQZlFMDU8,1103 +python_slugify-8.0.1.dist-info/METADATA,sha256=LVPaRoPcTNzPsamnpcpcxsOcyiCCoGApIPuT_memhFE,8176 +python_slugify-8.0.1.dist-info/WHEEL,sha256=Z-nyYpwrcSqxfdux5Mbn_DQ525iP7J2DG3JgGvOYyTQ,110 +python_slugify-8.0.1.dist-info/entry_points.txt,sha256=vd1gzjXoYZ16TfgZThH2nhVFwhAsWCecqUMGyHijAP8,51 +python_slugify-8.0.1.dist-info/top_level.txt,sha256=D7zuR7zxISqlCxArlOOOuLsWObz1_3jgosq5XhlSpew,8 +python_slugify-8.0.1.dist-info/RECORD,, diff --git a/third_party/python/python_slugify/python_slugify-8.0.1.dist-info/WHEEL b/third_party/python/python_slugify/python_slugify-8.0.1.dist-info/WHEEL new file mode 100644 index 0000000000000..01b8fc7d4a10c --- /dev/null +++ b/third_party/python/python_slugify/python_slugify-8.0.1.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.36.2) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/third_party/python/python_slugify/python_slugify-8.0.1.dist-info/entry_points.txt b/third_party/python/python_slugify/python_slugify-8.0.1.dist-info/entry_points.txt new file mode 100644 index 0000000000000..0ef496e6f9065 --- /dev/null +++ b/third_party/python/python_slugify/python_slugify-8.0.1.dist-info/entry_points.txt @@ -0,0 +1,3 @@ +[console_scripts] +slugify = slugify.__main__:main + diff --git a/third_party/python/python_slugify/python_slugify-8.0.1.dist-info/top_level.txt b/third_party/python/python_slugify/python_slugify-8.0.1.dist-info/top_level.txt new file mode 100644 index 0000000000000..f4843f722bf1f --- /dev/null +++ b/third_party/python/python_slugify/python_slugify-8.0.1.dist-info/top_level.txt @@ -0,0 +1 @@ +slugify diff --git a/third_party/python/python_slugify/slugify/__init__.py b/third_party/python/python_slugify/slugify/__init__.py new file mode 100644 index 0000000000000..6d3279fb1ac2e --- /dev/null +++ b/third_party/python/python_slugify/slugify/__init__.py @@ -0,0 +1,10 @@ +from .special import * +from .slugify import * +from .__version__ import __title__ +from .__version__ import __author__ +from .__version__ import __author_email__ +from .__version__ import __description__ +from .__version__ import __url__ +from .__version__ import __license__ +from .__version__ import __copyright__ +from .__version__ import __version__ diff --git a/third_party/python/python_slugify/slugify/__main__.py b/third_party/python/python_slugify/slugify/__main__.py new file mode 100644 index 0000000000000..7dd6b01a5edfd --- /dev/null +++ b/third_party/python/python_slugify/slugify/__main__.py @@ -0,0 +1,96 @@ +from __future__ import print_function, absolute_import +import argparse +import sys + +from .slugify import slugify, DEFAULT_SEPARATOR + + +def parse_args(argv): + parser = argparse.ArgumentParser(description="Slug string") + + input_group = parser.add_argument_group(description="Input") + input_group.add_argument("input_string", nargs='*', + help='Text to slugify') + input_group.add_argument("--stdin", action='store_true', + help="Take the text from STDIN") + + parser.add_argument("--no-entities", action='store_false', dest='entities', default=True, + help="Do not convert HTML entities to unicode") + parser.add_argument("--no-decimal", action='store_false', dest='decimal', default=True, + help="Do not convert HTML decimal to unicode") + parser.add_argument("--no-hexadecimal", action='store_false', dest='hexadecimal', default=True, + help="Do not convert HTML hexadecimal to unicode") + parser.add_argument("--max-length", type=int, default=0, + help="Output string length, 0 for no limit") + parser.add_argument("--word-boundary", action='store_true', default=False, + help="Truncate to complete word even if length ends up shorter than --max_length") + parser.add_argument("--save-order", action='store_true', default=False, + help="When set and --max_length > 0 return whole words in the initial order") + parser.add_argument("--separator", type=str, default=DEFAULT_SEPARATOR, + help="Separator between words. By default " + DEFAULT_SEPARATOR) + parser.add_argument("--stopwords", nargs='+', + help="Words to discount") + parser.add_argument("--regex-pattern", + help="Python regex pattern for disallowed characters") + parser.add_argument("--no-lowercase", action='store_false', dest='lowercase', default=True, + help="Activate case sensitivity") + parser.add_argument("--replacements", nargs='+', + help="""Additional replacement rules e.g. "|->or", "%%->percent".""") + parser.add_argument("--allow-unicode", action='store_true', default=False, + help="Allow unicode characters") + + args = parser.parse_args(argv[1:]) + + if args.input_string and args.stdin: + parser.error("Input strings and --stdin cannot work together") + + if args.replacements: + def split_check(repl): + SEP = '->' + if SEP not in repl: + parser.error("Replacements must be of the form: ORIGINAL{SEP}REPLACED".format(SEP=SEP)) + return repl.split(SEP, 1) + args.replacements = [split_check(repl) for repl in args.replacements] + + if args.input_string: + args.input_string = " ".join(args.input_string) + elif args.stdin: + args.input_string = sys.stdin.read() + + if not args.input_string: + args.input_string = '' + + return args + + +def slugify_params(args): + return dict( + text=args.input_string, + entities=args.entities, + decimal=args.decimal, + hexadecimal=args.hexadecimal, + max_length=args.max_length, + word_boundary=args.word_boundary, + save_order=args.save_order, + separator=args.separator, + stopwords=args.stopwords, + lowercase=args.lowercase, + replacements=args.replacements, + allow_unicode=args.allow_unicode + ) + + +def main(argv=None): # pragma: no cover + """ Run this program """ + if argv is None: + argv = sys.argv + args = parse_args(argv) + params = slugify_params(args) + try: + print(slugify(**params)) + except KeyboardInterrupt: + sys.exit(-1) + + +if __name__ == '__main__': # pragma: no cover + main() diff --git a/third_party/python/python_slugify/slugify/__version__.py b/third_party/python/python_slugify/slugify/__version__.py new file mode 100644 index 0000000000000..a558d9bce43fd --- /dev/null +++ b/third_party/python/python_slugify/slugify/__version__.py @@ -0,0 +1,8 @@ +__title__ = 'python-slugify' +__author__ = 'Val Neekman' +__author_email__ = 'info@neekware.com' +__description__ = 'A Python slugify application that also handles Unicode' +__url__ = 'https://github.com/un33k/python-slugify' +__license__ = 'MIT' +__copyright__ = 'Copyright 2022 Val Neekman @ Neekware Inc.' +__version__ = '8.0.1' diff --git a/third_party/python/python_slugify/slugify/slugify.py b/third_party/python/python_slugify/slugify/slugify.py new file mode 100644 index 0000000000000..5354fa5e447f6 --- /dev/null +++ b/third_party/python/python_slugify/slugify/slugify.py @@ -0,0 +1,177 @@ +import re +import sys +import unicodedata +from html.entities import name2codepoint + +try: + import unidecode +except ImportError: + import text_unidecode as unidecode + +__all__ = ['slugify', 'smart_truncate'] + + +CHAR_ENTITY_PATTERN = re.compile(r'&(%s);' % '|'.join(name2codepoint)) +DECIMAL_PATTERN = re.compile(r'&#(\d+);') +HEX_PATTERN = re.compile(r'&#x([\da-fA-F]+);') +QUOTE_PATTERN = re.compile(r'[\']+') +DISALLOWED_CHARS_PATTERN = re.compile(r'[^-a-zA-Z0-9]+') +DISALLOWED_UNICODE_CHARS_PATTERN = re.compile(r'[\W_]+') +DUPLICATE_DASH_PATTERN = re.compile(r'-{2,}') +NUMBERS_PATTERN = re.compile(r'(?<=\d),(?=\d)') +DEFAULT_SEPARATOR = '-' + + +def smart_truncate(string, max_length=0, word_boundary=False, separator=' ', save_order=False): + """ + Truncate a string. + :param string (str): string for modification + :param max_length (int): output string length + :param word_boundary (bool): + :param save_order (bool): if True then word order of output string is like input string + :param separator (str): separator between words + :return: + """ + + string = string.strip(separator) + + if not max_length: + return string + + if len(string) < max_length: + return string + + if not word_boundary: + return string[:max_length].strip(separator) + + if separator not in string: + return string[:max_length] + + truncated = '' + for word in string.split(separator): + if word: + next_len = len(truncated) + len(word) + if next_len < max_length: + truncated += '{}{}'.format(word, separator) + elif next_len == max_length: + truncated += '{}'.format(word) + break + else: + if save_order: + break + if not truncated: # pragma: no cover + truncated = string[:max_length] + return truncated.strip(separator) + + +def slugify(text, entities=True, decimal=True, hexadecimal=True, max_length=0, word_boundary=False, + separator=DEFAULT_SEPARATOR, save_order=False, stopwords=(), regex_pattern=None, lowercase=True, + replacements=(), allow_unicode=False): + """ + Make a slug from the given text. + :param text (str): initial text + :param entities (bool): converts html entities to unicode + :param decimal (bool): converts html decimal to unicode + :param hexadecimal (bool): converts html hexadecimal to unicode + :param max_length (int): output string length + :param word_boundary (bool): truncates to complete word even if length ends up shorter than max_length + :param save_order (bool): if parameter is True and max_length > 0 return whole words in the initial order + :param separator (str): separator between words + :param stopwords (iterable): words to discount + :param regex_pattern (str): regex pattern for disallowed characters + :param lowercase (bool): activate case sensitivity by setting it to False + :param replacements (iterable): list of replacement rules e.g. [['|', 'or'], ['%', 'percent']] + :param allow_unicode (bool): allow unicode characters + :return (str): + """ + + # user-specific replacements + if replacements: + for old, new in replacements: + text = text.replace(old, new) + + # ensure text is unicode + if not isinstance(text, str): + text = str(text, 'utf-8', 'ignore') + + # replace quotes with dashes - pre-process + text = QUOTE_PATTERN.sub(DEFAULT_SEPARATOR, text) + + # decode unicode + if not allow_unicode: + text = unidecode.unidecode(text) + + # ensure text is still in unicode + if not isinstance(text, str): + text = str(text, 'utf-8', 'ignore') + + # character entity reference + if entities: + text = CHAR_ENTITY_PATTERN.sub(lambda m: chr(name2codepoint[m.group(1)]), text) + + # decimal character reference + if decimal: + try: + text = DECIMAL_PATTERN.sub(lambda m: chr(int(m.group(1))), text) + except Exception: + pass + + # hexadecimal character reference + if hexadecimal: + try: + text = HEX_PATTERN.sub(lambda m: chr(int(m.group(1), 16)), text) + except Exception: + pass + + # translate + if allow_unicode: + text = unicodedata.normalize('NFKC', text) + else: + text = unicodedata.normalize('NFKD', text) + + if sys.version_info < (3,): + text = text.encode('ascii', 'ignore') + + # make the text lowercase (optional) + if lowercase: + text = text.lower() + + # remove generated quotes -- post-process + text = QUOTE_PATTERN.sub('', text) + + # cleanup numbers + text = NUMBERS_PATTERN.sub('', text) + + # replace all other unwanted characters + if allow_unicode: + pattern = regex_pattern or DISALLOWED_UNICODE_CHARS_PATTERN + else: + pattern = regex_pattern or DISALLOWED_CHARS_PATTERN + + text = re.sub(pattern, DEFAULT_SEPARATOR, text) + + # remove redundant + text = DUPLICATE_DASH_PATTERN.sub(DEFAULT_SEPARATOR, text).strip(DEFAULT_SEPARATOR) + + # remove stopwords + if stopwords: + if lowercase: + stopwords_lower = [s.lower() for s in stopwords] + words = [w for w in text.split(DEFAULT_SEPARATOR) if w not in stopwords_lower] + else: + words = [w for w in text.split(DEFAULT_SEPARATOR) if w not in stopwords] + text = DEFAULT_SEPARATOR.join(words) + + # finalize user-specific replacements + if replacements: + for old, new in replacements: + text = text.replace(old, new) + + # smart truncate if requested + if max_length > 0: + text = smart_truncate(text, max_length, word_boundary, DEFAULT_SEPARATOR, save_order) + + if separator != DEFAULT_SEPARATOR: + text = text.replace(DEFAULT_SEPARATOR, separator) + + return text diff --git a/third_party/python/python_slugify/slugify/special.py b/third_party/python/python_slugify/slugify/special.py new file mode 100644 index 0000000000000..54eb85c70e9ff --- /dev/null +++ b/third_party/python/python_slugify/slugify/special.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- + + +def add_uppercase_char(char_list): + """ Given a replacement char list, this adds uppercase chars to the list """ + + for item in char_list: + char, xlate = item + upper_dict = char.upper(), xlate.capitalize() + if upper_dict not in char_list and char != upper_dict[0]: + char_list.insert(0, upper_dict) + return char_list + + +# Language specific pre translations +# Source awesome-slugify + +_CYRILLIC = [ # package defaults: + (u'ё', u'e'), # io / yo + (u'я', u'ya'), # ia + (u'х', u'h'), # kh + (u'у', u'y'), # u + (u'щ', u'sch'), # sch + (u'ю', u'u'), # iu / yu +] +CYRILLIC = add_uppercase_char(_CYRILLIC) + +_GERMAN = [ # package defaults: + (u'ä', u'ae'), # a + (u'ö', u'oe'), # o + (u'ü', u'ue'), # u +] +GERMAN = add_uppercase_char(_GERMAN) + +_GREEK = [ # package defaults: + (u'χ', u'ch'), # kh + (u'Ξ', u'X'), # Ks + (u'ϒ', u'Y'), # U + (u'υ', u'y'), # u + (u'ύ', u'y'), + (u'ϋ', u'y'), + (u'ΰ', u'y'), +] +GREEK = add_uppercase_char(_GREEK) + +# Pre translations +PRE_TRANSLATIONS = CYRILLIC + GERMAN + GREEK diff --git a/third_party/python/requirements.in b/third_party/python/requirements.in index 9fcb3bfc924b0..7202bbe0a5384 100644 --- a/third_party/python/requirements.in +++ b/third_party/python/requirements.in @@ -42,7 +42,7 @@ setuptools==51.2.0 six==1.13.0 slugid==2.0.0 taskcluster==44.2.2 -taskcluster-taskgraph==3.5.2 +taskcluster-taskgraph==5.6.1 taskcluster-urls==13.0.1 toml==0.10.2 tqdm==4.62.3 diff --git a/third_party/python/requirements.txt b/third_party/python/requirements.txt index dfb5dcd90347d..c6e7f9a39dc2e 100644 --- a/third_party/python/requirements.txt +++ b/third_party/python/requirements.txt @@ -42,12 +42,18 @@ ansicon==1.89.0 ; python_version >= "3.7" and python_version < "4.0" and platfor appdirs==1.4.4 ; python_version >= "3.7" and python_version < "4.0" \ --hash=sha256:7d5d0167b2b1ba821647616af46a749d1c653740dd0d2415100fe26e27afdf41 \ --hash=sha256:a841dacd6b99318a741b166adb07e19ee71a274450e68237b4650ca1055ab128 +arrow==1.2.3 ; python_version >= "3.7" and python_version < "4.0" \ + --hash=sha256:3934b30ca1b9f292376d9db15b19446088d12ec58629bc3f0da28fd55fb633a1 \ + --hash=sha256:5a49ab92e3b7b71d96cd6bfcc4df14efefc9dfa96ea19045815914a6ab6b1fe2 async-timeout==3.0.1 ; python_version >= "3.7" and python_version < "4.0" \ --hash=sha256:0c3c816a028d47f659d6ff5c745cb2acf1f966da1fe5c19c77a70282b25f4c5f \ --hash=sha256:4291ca197d287d274d0b6cb5d6f8f8f82d434ed288f962539ff18cc9012f9ea3 attrs==23.1.0 ; python_version >= "3.7" and python_version < "4.0" \ --hash=sha256:1f28b4522cdc2fb4256ac1a020c78acf9cba2c6b461ccd2c126f3aa8e8335d04 \ --hash=sha256:6279836d581513a26f1bf235f9acd333bc9115683f14f7e8fae46c98fc50e015 +binaryornot==0.4.4 ; python_version >= "3.7" and python_version < "4.0" \ + --hash=sha256:359501dfc9d40632edc9fac890e19542db1a287bbcfa58175b66658392018061 \ + --hash=sha256:b8b71173c917bddcd2c16070412e369c3ed7f0528926f70cac18a6c97fd563e4 blessed==1.19.1 ; python_version >= "3.7" and python_version < "4.0" \ --hash=sha256:63b8554ae2e0e7f43749b6715c734cc8f3883010a809bf16790102563e6cf25b \ --hash=sha256:9a0d099695bf621d4680dd6c73f6ad547f6a3442fbdbe80c4b1daa1edbc492fc @@ -69,6 +75,9 @@ colorama==0.4.5 ; python_version >= "3.7" and python_version < "4.0" \ compare-locales==9.0.1 ; python_version >= "3.7" and python_version < "4" \ --hash=sha256:2de0f1d382749fffa6a482d462daff0d70bbc99d48520a0bf8459b22dc7fe9da \ --hash=sha256:eda953796841cbfab508ee35f7613a38ae7fbeed48bd26bf5cda9063bd638f06 +cookiecutter==2.1.1 ; python_version >= "3.7" and python_version < "4.0" \ + --hash=sha256:9f3ab027cec4f70916e28f03470bdb41e637a3ad354b4d65c765d93aad160022 \ + --hash=sha256:f3982be8d9c53dac1261864013fdec7f83afd2e42ede6f6dd069c5e149c540d5 cookies==2.2.1 ; python_version >= "3.7" and python_version < "4.0" \ --hash=sha256:15bee753002dff684987b8df8c235288eb8d45f8191ae056254812dfd42c81d3 \ --hash=sha256:d6b698788cae4cfa4e62ef8643a9ca332b79bd96cb314294b864ae8d7eb3ee8e @@ -107,6 +116,9 @@ importlib-metadata==6.0.0 ; python_version >= "3.7" and python_version < "4.0" \ importlib-resources==5.12.0 ; python_version >= "3.7" and python_version < "3.9" \ --hash=sha256:4be82589bf5c1d7999aedf2a45159d10cb3ca4f19b2271f8792bc8e6da7b22f6 \ --hash=sha256:7b1deeebbf351c7578e09bf2f63fa2ce8b5ffec296e0d349139d43cca061a81a +jinja2-time==0.2.0 ; python_version >= "3.7" and python_version < "4.0" \ + --hash=sha256:d14eaa4d315e7688daa4969f616f226614350c48730bfa1692d2caebd8c90d40 \ + --hash=sha256:d3eab6605e3ec8b7a0863df09cc1d23714908fa61aa6986a845c20ba488b4efa jinja2==2.11.3 ; python_version >= "3.7" and python_version < "4.0" \ --hash=sha256:03e47ad063331dd6a3f04a43eddca8a966a26ba0c5b7207a9a9e4e08f1b29419 \ --hash=sha256:a6d58433de0ae800347cab1fa3043cebbabe8baa9d29e668f1c768cb87a333c6 @@ -270,8 +282,14 @@ pyparsing==2.4.7 ; python_version >= "3.7" and python_version < "4.0" \ --hash=sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b pyrsistent==0.16.0 ; python_version >= "3.7" and python_version < "4.0" \ --hash=sha256:28669905fe725965daa16184933676547c5bb40a5153055a8dee2a4bd7933ad3 +python-dateutil==2.8.2 ; python_version >= "3.7" and python_version < "4.0" \ + --hash=sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86 \ + --hash=sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9 python-hglib==2.4 ; python_version >= "3.7" and python_version < "4.0" \ --hash=sha256:693d6ed92a6566e78802c7a03c256cda33d08c63ad3f00fcfa11379b184b9462 +python-slugify==8.0.1 ; python_version >= "3.7" and python_version < "4.0" \ + --hash=sha256:70ca6ea68fe63ecc8fa4fcf00ae651fc8a5d02d93dcd12ae6d4fc7ca46c4d395 \ + --hash=sha256:ce0d46ddb668b3be82f4ed5e503dbc33dd815d83e2eb6824211310d3fb172a27 pyyaml==6.0.1 ; python_version >= "3.7" and python_version < "4.0" \ --hash=sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc \ --hash=sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741 \ @@ -339,9 +357,9 @@ six==1.13.0 ; python_version >= "3.7" and python_version < "4.0" \ slugid==2.0.0 ; python_version >= "3.7" and python_version < "4.0" \ --hash=sha256:a950d98b72691178bdd4d6c52743c4a2aa039207cf7a97d71060a111ff9ba297 \ --hash=sha256:aec8b0e01c4ad32e38e12d609eab3ec912fd129aaf6b2ded0199b56a5f8fd67c -taskcluster-taskgraph==3.5.2 ; python_version >= "3.7" and python_version < "4.0" \ - --hash=sha256:62f1a320d6b310f65151904a9992719a9b2c4c41ef8f57be810899fd3c5d2703 \ - --hash=sha256:6a024ba2383f56e11b764500f92837afb825612a49d24bde9791dfa7aa7ddaec +taskcluster-taskgraph==5.6.1 ; python_version >= "3.7" and python_version < "4.0" \ + --hash=sha256:15c5455eefe2b91155e694452199f9cc4753dff5a74a95cf602e334f9621a8e6 \ + --hash=sha256:db14109f1edcbe03c96d2de81eb84e2fed5d218d86a5d026947eeb1a6cfe5a28 taskcluster-urls==13.0.1 ; python_version >= "3.7" and python_version < "4.0" \ --hash=sha256:5e25e7e6818e8877178b175ff43d2e6548afad72694aa125f404a7329ece0973 \ --hash=sha256:b25e122ecec249c4299ac7b20b08db76e3e2025bdaeb699a9d444556de5fd367 \ @@ -350,6 +368,9 @@ taskcluster==44.2.2 ; python_version >= "3.7" and python_version < "4.0" \ --hash=sha256:0266a6a901e1a2ec838984a7f24e7adb6d58f9f2e221a7f613388f8f23f786fc \ --hash=sha256:846d73c597f0f47dd8525c85c8d9bc41111d5200b090690d3f16b2f57c56a2e1 \ --hash=sha256:c1b0e82be25b1ed17e07c90b24a382634b2bfce273fdf2682d94568abe10716c +text-unidecode==1.3 ; python_version >= "3.7" and python_version < "4.0" \ + --hash=sha256:1311f10e8b895935241623731c2ba64f4c455287888b18189350b67134a822e8 \ + --hash=sha256:bad6603bb14d279193107714b288be206cac565dfa49aa5b105294dd5c4aab93 toml==0.10.2 ; python_version >= "3.7" and python_version < "4.0" \ --hash=sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b \ --hash=sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f diff --git a/third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-3.5.2.dist-info/RECORD b/third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-3.5.2.dist-info/RECORD deleted file mode 100644 index 8af8cb00e6fc3..0000000000000 --- a/third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-3.5.2.dist-info/RECORD +++ /dev/null @@ -1,74 +0,0 @@ -taskgraph/__init__.py,sha256=jwOtU7TkmU317LP_IsgIswpj2T1OPUXXgMRv4sIU7nE,707 -taskgraph/config.py,sha256=MoFLjKPUViWYGALi_acWDVXZs7M8cy0zQpUKsJSlBMs,4411 -taskgraph/create.py,sha256=1z2AyLvHMkZfDkmPy6um86HG9xTRhE0Sphnbpd-kuEg,5190 -taskgraph/decision.py,sha256=ApfQeXumRH7uq55DLt7gjQCh_eKls6lPhnNaH2ZpR-0,12849 -taskgraph/docker.py,sha256=dB282jKjfLnHwL73YSg1Eeqj-ojHQc676vEpWt4PjVw,7835 -taskgraph/files_changed.py,sha256=W3_gEgUT-mVH9DaaU_8X6gYpftrqBU3kgveGbzPLziU,2793 -taskgraph/filter_tasks.py,sha256=R7tYXiaVPGIkQ6O1c9-QJrKZ59m9pFXCloUlPraVnZU,866 -taskgraph/generator.py,sha256=tonQ3UvaZYRdpWOtmdQ5Mr4en1FRCUJvbvlbzfChluM,15590 -taskgraph/graph.py,sha256=9tE3bSSBRHvRLgJzK4dTieGT3RrzQZdR1YbKizEhzlw,4667 -taskgraph/main.py,sha256=rb7cwghT5U97kSpIho0KzXo4HSXp2Iw_jaL2A2Qrf18,23581 -taskgraph/morph.py,sha256=8qxYdruEQkbHGqv7dh3e1OWhH9Y5i6bFUKzDMs-Ctnw,9625 -taskgraph/parameters.py,sha256=4JWaL_otzQaQjmXc7-HnjfhlHYSaltYRb_6xeUNbERY,11906 -taskgraph/target_tasks.py,sha256=41BIVwiATy8DCQujPduTtnFmgHlKOfw6RPGL4b20WO8,3324 -taskgraph/task.py,sha256=QCrOzMaTsy5QHShKUo89XgjJVMl3cSZGZJPLuHCXItE,3132 -taskgraph/taskgraph.py,sha256=tfj0ZMqjuwEQDET0W57EcP-_KBEbqkxJci9Z6DkeOEQ,2397 -taskgraph/actions/__init__.py,sha256=lVP1e0YyELg7-_42MWWDbT0cKv_p53BApVE6vWOiPww,416 -taskgraph/actions/add_new_jobs.py,sha256=mX_DFDJaQUHetjyMNi5b8zPCCeqfzDrCjDg5DxTaA-I,1831 -taskgraph/actions/cancel.py,sha256=UQSt_6y3S6PXNmUo_mNaUOuDvK2bixWjzdjTKXieEEg,1309 -taskgraph/actions/cancel_all.py,sha256=-ETWKl8BHkk5HjGZRIJpUsFOySE6co0pL0dBDupolu8,1947 -taskgraph/actions/registry.py,sha256=xmhoEGMyYj6TTRFwMowZAUp0aqvtLvdVfmRWM7Yh7xo,13122 -taskgraph/actions/retrigger.py,sha256=awSC8XRtPJxADz5tbEWTKdNEudG8SpwUOM7z2lXxH1U,9382 -taskgraph/actions/util.py,sha256=jA5xXehV8N2G542LZOEci_gMHEFN-BrIjkA55On0kc0,10673 -taskgraph/loader/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -taskgraph/loader/transform.py,sha256=olUBPjxk3eEIg25sduxlcyqhjoig4ts5kPlT_zs6g9g,2147 -taskgraph/optimize/__init__.py,sha256=Oqpq1RW8QzOcu7zaMlNQ3BHT9ws9e_93FWfCqzNcQps,123 -taskgraph/optimize/base.py,sha256=WvoDNewyHG46IQbG3th-aau9OxSKegsYNfvdOEmunbA,18341 -taskgraph/optimize/strategies.py,sha256=Y5fS-f_3xsQNfFjCXIwDxrwXBvyp4yZxdPVNh49c7XU,2381 -taskgraph/run-task/fetch-content,sha256=z3kx-vxaaaAmfqW-JW7dPKIFpjnxdZiXMdpPj1jAG8M,29915 -taskgraph/run-task/hgrc,sha256=BybWLDR89bWi3pE5T05UqmDHs02CbLypE-omLZWU6Uk,896 -taskgraph/run-task/robustcheckout.py,sha256=tZi_FRGFhX27fspaUj2RGsMCmkwn8IfpRiSsPOrGfXQ,29802 -taskgraph/run-task/run-task,sha256=zT83gWFaB0qBWdxCLxOVHiMdq1bmSmi90FjXjcegfpk,43584 -taskgraph/transforms/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -taskgraph/transforms/base.py,sha256=N9ec4kw65V_J2KY4C4QRPlbIREbRDYwTlhClstYmOBU,5285 -taskgraph/transforms/cached_tasks.py,sha256=Z10VD1kEBVXJvj8qSsNTq2mYpklh0V1EN8OT6QK3v_E,2607 -taskgraph/transforms/code_review.py,sha256=eE2xrDtdD_n3HT3caQ2HGAkPm6Uutdm4hDCpCoFjEps,707 -taskgraph/transforms/docker_image.py,sha256=ADiOUB-Ngm9Y6uwzGDpQsDJ_-4w6-ZYwLCxQ-0b16E0,7567 -taskgraph/transforms/fetch.py,sha256=Q7Co4wdBKL6Tr3Uc-eitJ3NGgGUYmRXNLuC5m-59-M8,10443 -taskgraph/transforms/release_notifications.py,sha256=jrb9CCT-z_etDf690T-AeCvdzIoVWBAeM_FGoW7FIzA,3305 -taskgraph/transforms/task.py,sha256=fBiSCyC0Lzd2GDSZ_QwhQ1RRebXLmkw4ZCPte9fwEL8,48212 -taskgraph/transforms/job/__init__.py,sha256=ayAytoDmlmNvJNArJc-_nBz1Xuc191rZdbobUgp9hQA,17192 -taskgraph/transforms/job/common.py,sha256=XtKSxUCwRYqpPgRTyLD_8JGRuJs2JYuR0RXpTarPdTE,6826 -taskgraph/transforms/job/index_search.py,sha256=Ngh9FFu1bx2kHVTChW2vcrbnb3SzMneRHopXk18RfB4,1220 -taskgraph/transforms/job/run_task.py,sha256=z5DqgHmmHYEbKtnpMQqcMY6ksgCnnoB7CugH3Z41Gag,8610 -taskgraph/transforms/job/toolchain.py,sha256=WWsj6L_db9rJxzo26TdEf_0jcrK4MCoHHJDzFBkSFpI,5978 -taskgraph/util/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -taskgraph/util/archive.py,sha256=nzYn8cQ3NfLAeV-2SuTNoeQ6hg8m40f6FQcSTyVIKwQ,2855 -taskgraph/util/attributes.py,sha256=zSaRws02rdF1TgvOoFzVNLg2XmwbtVVCTsp4M_qm3RI,2617 -taskgraph/util/cached_tasks.py,sha256=o-yJ91wlWbzoDB2GvKPpGcDE27_IEMgczp_figEBjV8,3406 -taskgraph/util/decision.py,sha256=uTC143FpTKQkGff5jIz3voWRYXBCHgx-XAm7FMW53hE,2433 -taskgraph/util/docker.py,sha256=vdTruZT2Z_GVcyAYilaHt8VaRj4b-dtBKVWlq_GwYvE,11699 -taskgraph/util/hash.py,sha256=71R979-mlDnwTXC5GXrOWTS5VpW4DFWWK9S8Urm_Uic,1560 -taskgraph/util/keyed_by.py,sha256=cgBH4tG8eH5UUrm5q4ODG7A4fzkGAOI7feVoZy3V8Ho,3419 -taskgraph/util/memoize.py,sha256=XDlwc-56gzoY8QTwOoiCOYL-igX7JoMcY-9Ih80Euc8,1331 -taskgraph/util/parameterization.py,sha256=dzxh8Bc8MBKoDMwj2V2AQab9UrC-JcM3tg0hDVTWpjc,3184 -taskgraph/util/path.py,sha256=GOWPdvC144PVy8rsLda8SPenofwSnBaD0L5aJdDNtao,4688 -taskgraph/util/python_path.py,sha256=ed4F5z2mId56LauVczgxm_LGxgQi8XlxlYDgXOPZyII,1576 -taskgraph/util/readonlydict.py,sha256=XzTG-gqGqWVlSkDxSyOL6Ur7Z0ONhIJ9DVLWV3q4q1w,787 -taskgraph/util/schema.py,sha256=JGd0Imjfv6JKCY_tjJtOYwI6uwKUaNgzAcvcZj5WE6A,8323 -taskgraph/util/shell.py,sha256=MB9zHVSvxgOuszgmKr2rWUDahANZkbHHNkjjagZG_3I,1317 -taskgraph/util/taskcluster.py,sha256=cGUGvkrefRHngjyZm_iQRYKRlGi4jMIr7ky0fi_YBrg,12445 -taskgraph/util/taskgraph.py,sha256=ecKEvTfmLVvEKLPO_0g34CqVvc0iCzuNMh3064BZNrE,1969 -taskgraph/util/templates.py,sha256=Dqxfl244u-PX7dnsk3_vYyzDwpDgJtANK6NmZwN3Qow,1417 -taskgraph/util/time.py,sha256=pNFcTH-iYRfm2-okm1lMATc4B5wO-_FXbOFXEtXD27g,3390 -taskgraph/util/treeherder.py,sha256=XrdE-Je0ZvXe6_8f0DvvqNbrHherUk-hUuxirImPEIo,2138 -taskgraph/util/vcs.py,sha256=i13idS8y9ooR216mnd1gksdjSgHBNlAZEdq7Xr-ROwE,18536 -taskgraph/util/verify.py,sha256=YETuZVkwnfYe57GRPx2x_vedstgqdGiH46HLWAdcks8,8827 -taskgraph/util/workertypes.py,sha256=5g2mgIbEKMzDpZNnmPMoMNyy7Wahi-jmWcV1amDAcPo,2341 -taskgraph/util/yaml.py,sha256=hfKI_D8Q7dimq4_VvO3WEh8CJsTrsIMwN6set7HIQbY,990 -taskcluster_taskgraph-3.5.2.dist-info/LICENSE,sha256=HyVuytGSiAUQ6ErWBHTqt1iSGHhLmlC8fO7jTCuR8dU,16725 -taskcluster_taskgraph-3.5.2.dist-info/METADATA,sha256=8vZXhtvvL0WcQK5Sp9vslS9bdJHFN0LWZG0YzEUZips,1126 -taskcluster_taskgraph-3.5.2.dist-info/WHEEL,sha256=G16H4A3IeoQmnOrYV4ueZGKSjhipXx8zc8nu9FGlvMA,92 -taskcluster_taskgraph-3.5.2.dist-info/entry_points.txt,sha256=VoXNtZpN4LvyXYB1wq47AU9CO-DMYMJ0VktKxjugzbY,51 -taskcluster_taskgraph-3.5.2.dist-info/top_level.txt,sha256=3JNeYn_hNiNXC7DrdH_vcv-WYSE7QdgGjdvUYvSjVp0,10 -taskcluster_taskgraph-3.5.2.dist-info/RECORD,, diff --git a/third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-3.5.2.dist-info/LICENSE b/third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-5.6.1.dist-info/LICENSE similarity index 100% rename from third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-3.5.2.dist-info/LICENSE rename to third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-5.6.1.dist-info/LICENSE diff --git a/third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-3.5.2.dist-info/METADATA b/third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-5.6.1.dist-info/METADATA similarity index 87% rename from third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-3.5.2.dist-info/METADATA rename to third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-5.6.1.dist-info/METADATA index 79aded7e6c86f..bb2e292d7d5a2 100644 --- a/third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-3.5.2.dist-info/METADATA +++ b/third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-5.6.1.dist-info/METADATA @@ -1,22 +1,20 @@ Metadata-Version: 2.1 Name: taskcluster-taskgraph -Version: 3.5.2 +Version: 5.6.1 Summary: Build taskcluster taskgraphs Home-page: https://github.com/taskcluster/taskgraph -License: UNKNOWN -Platform: UNKNOWN Classifier: Development Status :: 5 - Production/Stable Classifier: Environment :: Console Classifier: License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0) -Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 Classifier: Programming Language :: Python :: 3.8 Classifier: Programming Language :: Python :: 3.9 Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 Classifier: Topic :: Software Development License-File: LICENSE Requires-Dist: appdirs (>=1.4) -Requires-Dist: attrs (>=19.1.0) +Requires-Dist: cookiecutter (~=2.1) Requires-Dist: json-e (>=2.7) Requires-Dist: mozilla-repo-urls Requires-Dist: PyYAML (>=5.4) @@ -29,5 +27,3 @@ Requires-Dist: voluptuous (>=0.12.1) Provides-Extra: load-image Requires-Dist: zstandard ; extra == 'load-image' -UNKNOWN - diff --git a/third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-5.6.1.dist-info/RECORD b/third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-5.6.1.dist-info/RECORD new file mode 100644 index 0000000000000..b6d7d825750b0 --- /dev/null +++ b/third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-5.6.1.dist-info/RECORD @@ -0,0 +1,105 @@ +taskgraph/__init__.py,sha256=l9JA5tNwrC6CWMVcScSKUzsCYfIrFNBcUkwMCLUJ8UM,729 +taskgraph/config.py,sha256=XJYKaA9Egn7aiyZ0v70VCq3Kc-XkK08CK2LDsDfsDR8,4822 +taskgraph/create.py,sha256=MeWVr5gKJefjwK_3_xZUcDDu2NVH97gbUuu1dw_I9hA,5184 +taskgraph/decision.py,sha256=qARBTlLYJ7NVw3aflrspRn_hFmvKcrXJ058yao_4b7A,12882 +taskgraph/docker.py,sha256=UtUfv3F7YBmrI7tJ1XODG_VvfwG0oWpNlsv59Bst728,7834 +taskgraph/files_changed.py,sha256=W3_gEgUT-mVH9DaaU_8X6gYpftrqBU3kgveGbzPLziU,2793 +taskgraph/filter_tasks.py,sha256=R7tYXiaVPGIkQ6O1c9-QJrKZ59m9pFXCloUlPraVnZU,866 +taskgraph/generator.py,sha256=I9MqXexMjM3v1HymuueHRmuost18jAQzFIgXxf3ejBU,15541 +taskgraph/graph.py,sha256=bHUsv2pPa2SSaWgBY-ItIj7REPd0o4fFYrwoQbwFKTY,4680 +taskgraph/main.py,sha256=-BC0J4PhLL-6nvzHgk2YGHfPH8yfjYVbgeKBKiJ25QQ,26201 +taskgraph/morph.py,sha256=Q6weAi-xpJM4XoKA2mM6gVXQYLnE1YSws53vTZygMkY,9192 +taskgraph/optimize.py,sha256=NVshvkqRKr7SQvRdqz5CELmnIXeiODkDxlK0D9QMi9k,16487 +taskgraph/parameters.py,sha256=4JWaL_otzQaQjmXc7-HnjfhlHYSaltYRb_6xeUNbERY,11906 +taskgraph/target_tasks.py,sha256=41BIVwiATy8DCQujPduTtnFmgHlKOfw6RPGL4b20WO8,3324 +taskgraph/task.py,sha256=tRr7WhJ2qjYXi-77wva17CpfK53m6W_cl-xzks_GGaQ,3240 +taskgraph/taskgraph.py,sha256=Fh5cX8LrgYmkpVP_uhpfRgHSKHfZjO-VGSmnFUjEru0,2434 +taskgraph/__pycache__/__init__.cpython-38.pyc,sha256=xV5GSUfAPrHX_KG1vugCJd-NZtAUqVTnEgfSTH57FBY,210 +taskgraph/__pycache__/config.cpython-38.pyc,sha256=clo1E3CxYuY2qfzjYunQWn1P7wHMZGmwcqzy9VzvUAc,3604 +taskgraph/__pycache__/filter_tasks.cpython-38.pyc,sha256=08jfJtCQQdVHeWWanb9OPSjgoAD2NCVNZML8q8pjV8o,952 +taskgraph/__pycache__/generator.cpython-38.pyc,sha256=LeHhZUcEWVzG3gx1NzyS9FR4cuDh6-Ad2qEtEMbYQ7Q,12505 +taskgraph/__pycache__/graph.cpython-38.pyc,sha256=13V7ghyw2KEC-8EiQkHgNu4d_9a8-AVgWGkYSJF3NnE,4871 +taskgraph/__pycache__/morph.cpython-38.pyc,sha256=o-Wzm3LVaNEfkksdsJIAwXjfFkHFuQki5dOSQqIURBU,6195 +taskgraph/__pycache__/parameters.cpython-38.pyc,sha256=_n5z1XmsHhsux6wg_O8eJBvvSjNk-GFW50JFMbI-cnU,9849 +taskgraph/__pycache__/target_tasks.cpython-38.pyc,sha256=aMxKCQ_25V0ccWgEJdvI4xzt3EkKeti8E5dU1R8X4kc,3450 +taskgraph/__pycache__/task.cpython-38.pyc,sha256=QI21Gzh1bQ8NfAwXyVhmZAwxAiS6eN6nMR11_Z8EebY,2768 +taskgraph/__pycache__/taskgraph.cpython-38.pyc,sha256=28UCokO5v9-tld0hSNxQ2Tv_0j7OWYszoHpudMOih6w,2565 +taskgraph/actions/__init__.py,sha256=lVP1e0YyELg7-_42MWWDbT0cKv_p53BApVE6vWOiPww,416 +taskgraph/actions/add_new_jobs.py,sha256=HAfuRDzFti_YmeudxqVl6hgrEbm-ki5-jSCDMC0HBDE,1836 +taskgraph/actions/cancel.py,sha256=UQSt_6y3S6PXNmUo_mNaUOuDvK2bixWjzdjTKXieEEg,1309 +taskgraph/actions/cancel_all.py,sha256=zrKgnW63gMGS5yldJieDt-GAR_XTiGRgybWAipIUCqQ,1941 +taskgraph/actions/rebuild_cached_tasks.py,sha256=UrVAvTmkkF4TAB5vNSpK1kJqMhMkKAMGmrifxH9kQJQ,1086 +taskgraph/actions/registry.py,sha256=xmhoEGMyYj6TTRFwMowZAUp0aqvtLvdVfmRWM7Yh7xo,13122 +taskgraph/actions/retrigger.py,sha256=wF08p_CgsfqraYelc3JLmPcqBFcO-Yt8gZZLlJZBixQ,9387 +taskgraph/actions/util.py,sha256=TxWxMWiKZeuKRwqiUawzjzpa5VF5AWgAKCLy7YaKG80,10661 +taskgraph/loader/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +taskgraph/loader/default.py,sha256=ND_Sx7yx7io1B-6rWEGkg3UIy7iO3DvSLMXwcEqF1N8,1185 +taskgraph/loader/transform.py,sha256=olUBPjxk3eEIg25sduxlcyqhjoig4ts5kPlT_zs6g9g,2147 +taskgraph/optimize/__init__.py,sha256=Oqpq1RW8QzOcu7zaMlNQ3BHT9ws9e_93FWfCqzNcQps,123 +taskgraph/optimize/base.py,sha256=WvoDNewyHG46IQbG3th-aau9OxSKegsYNfvdOEmunbA,18341 +taskgraph/optimize/strategies.py,sha256=IifMlxppVrIABsvn6UBwQYBFUdxkmyZz_FOtK6yNPps,2380 +taskgraph/optimize/__pycache__/__init__.cpython-38.pyc,sha256=CLBBOiPKI142wugZbBPN5MJ5XRyxhlcmfbjuJQBKI-Q,291 +taskgraph/optimize/__pycache__/base.cpython-38.pyc,sha256=aBAPa3KwWMtiAMhXN0LdHaspHwPWp3a9W7-Oci9x4_w,15334 +taskgraph/optimize/__pycache__/strategies.cpython-38.pyc,sha256=fjYsf_6YNa-kIA4vjDaJPgZIt2HaY1xskDJ90SyhQbA,1777 +taskgraph/run-task/fetch-content,sha256=G1aAvZlTg0yWHqxhSxi4RvfxW-KBJ5JwnGtWRqfH_bg,29990 +taskgraph/run-task/hgrc,sha256=BybWLDR89bWi3pE5T05UqmDHs02CbLypE-omLZWU6Uk,896 +taskgraph/run-task/robustcheckout.py,sha256=vPKvHb3fIIJli9ZVZG88XYoa8Sohy2JrpmH6pDgBDHI,30813 +taskgraph/run-task/run-task,sha256=KSIUkIfZUzjfJtiIPtwXBU4B0vA9hZnZslJ-heKRIuU,45128 +taskgraph/transforms/__init__.py,sha256=aw1dz2sRWZcbTILl6SVDuqIEw0mDdjSYu3LCVs-RLXE,110 +taskgraph/transforms/base.py,sha256=LFw2NwhrSriI3vbcCttArTFb7uHxckQpHeFZmatofvM,5146 +taskgraph/transforms/cached_tasks.py,sha256=Z10VD1kEBVXJvj8qSsNTq2mYpklh0V1EN8OT6QK3v_E,2607 +taskgraph/transforms/code_review.py,sha256=eE2xrDtdD_n3HT3caQ2HGAkPm6Uutdm4hDCpCoFjEps,707 +taskgraph/transforms/docker_image.py,sha256=AUuWMx43FcQfgbXy4_2Sjae0cWrh5XWMMcJ3ItcoKes,7606 +taskgraph/transforms/fetch.py,sha256=ORnxpVidOQtI1q1xeHl1c1jlShXD8R_jTGC2CX3lLM4,10479 +taskgraph/transforms/from_deps.py,sha256=aMqzvjC9ckK7T8-u4MoA0QyqSIceXfjJp4whExmUWHE,6647 +taskgraph/transforms/notify.py,sha256=0sga-Ls9dhWLAsL0FBjXmVbbduee8LAZp_1pHBQR0iI,6019 +taskgraph/transforms/release_notifications.py,sha256=jrb9CCT-z_etDf690T-AeCvdzIoVWBAeM_FGoW7FIzA,3305 +taskgraph/transforms/task.py,sha256=572A6ZIXDgm6SQ6vLqQbkhQzMIk5rNguC98xpezpoGQ,52086 +taskgraph/transforms/__pycache__/__init__.cpython-38.pyc,sha256=XHsSgZEVDiQqINzElOjBvjhPjyfaNjAwTtV5Aj6ubDQ,232 +taskgraph/transforms/__pycache__/base.cpython-38.pyc,sha256=s5IC570o9P2J12benwBfeAN7RSRWKKxPErkEHNZbq_c,4510 +taskgraph/transforms/job/__init__.py,sha256=Bzjm-R8lnYdCBkSVlMmNyRW_QTWUYBOhYr1hSrVmpEI,17271 +taskgraph/transforms/job/common.py,sha256=ldlbRI8sdEd-eUcre4GtXMerUg0RQZ_XSe9GwAkfI3I,5897 +taskgraph/transforms/job/index_search.py,sha256=Ngh9FFu1bx2kHVTChW2vcrbnb3SzMneRHopXk18RfB4,1220 +taskgraph/transforms/job/run_task.py,sha256=5vmSwjWBNniSU2UcbnE_BQGct4bUTULIivYXlFSqB-4,9814 +taskgraph/transforms/job/toolchain.py,sha256=GOqIvp1MgtV-6whi2ofgSCFB7GolikZbfLXz0C1h0vc,6015 +taskgraph/util/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +taskgraph/util/archive.py,sha256=nzYn8cQ3NfLAeV-2SuTNoeQ6hg8m40f6FQcSTyVIKwQ,2855 +taskgraph/util/attributes.py,sha256=pPOFmwkDQQ-IqfDpVghZ10YI_qXRY4Bi5JP3xr6XVvc,2964 +taskgraph/util/cached_tasks.py,sha256=o-yJ91wlWbzoDB2GvKPpGcDE27_IEMgczp_figEBjV8,3406 +taskgraph/util/decision.py,sha256=uTC143FpTKQkGff5jIz3voWRYXBCHgx-XAm7FMW53hE,2433 +taskgraph/util/dependencies.py,sha256=U9nncoFvE4aXWeOr_Q-igeKNkzqGvgSTveBZO3OMyI4,2592 +taskgraph/util/docker.py,sha256=vdTruZT2Z_GVcyAYilaHt8VaRj4b-dtBKVWlq_GwYvE,11699 +taskgraph/util/hash.py,sha256=31sQmDwQOavA5hWsmzWDNFoFTaTp5a7qLSQLNTEALD8,1661 +taskgraph/util/keyed_by.py,sha256=cgBH4tG8eH5UUrm5q4ODG7A4fzkGAOI7feVoZy3V8Ho,3419 +taskgraph/util/memoize.py,sha256=XDlwc-56gzoY8QTwOoiCOYL-igX7JoMcY-9Ih80Euc8,1331 +taskgraph/util/parameterization.py,sha256=dzxh8Bc8MBKoDMwj2V2AQab9UrC-JcM3tg0hDVTWpjc,3184 +taskgraph/util/path.py,sha256=e-JloOQV2-Oua_pe335bv4xWAB07vb82TKpu_zCOl0w,4466 +taskgraph/util/python_path.py,sha256=ed4F5z2mId56LauVczgxm_LGxgQi8XlxlYDgXOPZyII,1576 +taskgraph/util/readonlydict.py,sha256=XzTG-gqGqWVlSkDxSyOL6Ur7Z0ONhIJ9DVLWV3q4q1w,787 +taskgraph/util/schema.py,sha256=JGd0Imjfv6JKCY_tjJtOYwI6uwKUaNgzAcvcZj5WE6A,8323 +taskgraph/util/shell.py,sha256=MB9zHVSvxgOuszgmKr2rWUDahANZkbHHNkjjagZG_3I,1317 +taskgraph/util/taskcluster.py,sha256=cGUGvkrefRHngjyZm_iQRYKRlGi4jMIr7ky0fi_YBrg,12445 +taskgraph/util/taskgraph.py,sha256=ecKEvTfmLVvEKLPO_0g34CqVvc0iCzuNMh3064BZNrE,1969 +taskgraph/util/templates.py,sha256=Dqxfl244u-PX7dnsk3_vYyzDwpDgJtANK6NmZwN3Qow,1417 +taskgraph/util/time.py,sha256=pNFcTH-iYRfm2-okm1lMATc4B5wO-_FXbOFXEtXD27g,3390 +taskgraph/util/treeherder.py,sha256=A3rpPUQB60Gn1Yx-OZgKuWWGJ8x0-6tcdeeslzco9ag,2687 +taskgraph/util/vcs.py,sha256=wyDcz1oIvxyS7HbLFUP-G8Y1io3mV5dgfYagnDMSJ90,18780 +taskgraph/util/verify.py,sha256=cSd7EeP9hUvp-5WOvKDHrvpFAGb_LuiNPxPp0-YmNEA,8947 +taskgraph/util/workertypes.py,sha256=1wgM6vLrlgtyv8854anVIs0Bx11kV8JJJaKcOHJc2j0,2498 +taskgraph/util/yaml.py,sha256=hfKI_D8Q7dimq4_VvO3WEh8CJsTrsIMwN6set7HIQbY,990 +taskgraph/util/__pycache__/__init__.cpython-38.pyc,sha256=9brC-EPautANDgBHBurwO0lnHqM94S9LElkIu9Ib2_0,152 +taskgraph/util/__pycache__/attributes.cpython-38.pyc,sha256=T34puu_G9uGNF6cNJjrwjmRGSys5Or_ZVl9EKNwiL5U,2435 +taskgraph/util/__pycache__/keyed_by.cpython-38.pyc,sha256=Nqt3oZxiaNm80hi0dxYeW9Js0x6Hx6lO52fysVA1n4M,2546 +taskgraph/util/__pycache__/memoize.cpython-38.pyc,sha256=wYq34EKYhM6uVSz0VtHha6ABqJtrOtQPowfZN2Lc4VI,1364 +taskgraph/util/__pycache__/path.cpython-38.pyc,sha256=XZMKBZIK37z9RpfZL-T-c8jON-DS3jnK-rrMVmd8UUU,4918 +taskgraph/util/__pycache__/python_path.cpython-38.pyc,sha256=7CNSFhL1CJsGe0ivDIWcohyJJAr5BoAL_gesFZ0XfbE,1527 +taskgraph/util/__pycache__/schema.cpython-38.pyc,sha256=0Hew9ATBeA1amGZ_EjL7yULT2zJ4HQA6k0DDdfBmDxE,7183 +taskgraph/util/__pycache__/verify.cpython-38.pyc,sha256=8U5zC7jsLhwX6741yjcuH5quED3PxKItqDuACPc6yW0,7721 +taskgraph/util/__pycache__/workertypes.cpython-38.pyc,sha256=hD8JOa_1TnhyQSyNcAZRpltqzgazoa2ukQB5gDGTNB4,2014 +taskgraph/util/__pycache__/yaml.cpython-38.pyc,sha256=qOzXDWZxoUTcfeHjrKZKUKoI1y4vFKrMl93s7tqAAF4,1271 +taskcluster_taskgraph-5.6.1.dist-info/LICENSE,sha256=HyVuytGSiAUQ6ErWBHTqt1iSGHhLmlC8fO7jTCuR8dU,16725 +taskcluster_taskgraph-5.6.1.dist-info/METADATA,sha256=hCWRIzSli8YQZo8lumhEYgg_oIhC2DjFjwRC4xqHZOI,1087 +taskcluster_taskgraph-5.6.1.dist-info/WHEEL,sha256=G16H4A3IeoQmnOrYV4ueZGKSjhipXx8zc8nu9FGlvMA,92 +taskcluster_taskgraph-5.6.1.dist-info/entry_points.txt,sha256=2hxDzE3qq_sHh-J3ROqwpxgQgxO-196phWAQREl2-XA,50 +taskcluster_taskgraph-5.6.1.dist-info/top_level.txt,sha256=3JNeYn_hNiNXC7DrdH_vcv-WYSE7QdgGjdvUYvSjVp0,10 +taskcluster_taskgraph-5.6.1.dist-info/RECORD,, diff --git a/third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-5.6.1.dist-info/WHEEL b/third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-5.6.1.dist-info/WHEEL new file mode 100644 index 0000000000000..becc9a66ea739 --- /dev/null +++ b/third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-5.6.1.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.37.1) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-3.5.2.dist-info/entry_points.txt b/third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-5.6.1.dist-info/entry_points.txt similarity index 98% rename from third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-3.5.2.dist-info/entry_points.txt rename to third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-5.6.1.dist-info/entry_points.txt index 086555b5ccf5c..dec40df69f935 100644 --- a/third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-3.5.2.dist-info/entry_points.txt +++ b/third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-5.6.1.dist-info/entry_points.txt @@ -1,3 +1,2 @@ [console_scripts] taskgraph = taskgraph.main:main - diff --git a/third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-3.5.2.dist-info/top_level.txt b/third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-5.6.1.dist-info/top_level.txt similarity index 100% rename from third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-3.5.2.dist-info/top_level.txt rename to third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-5.6.1.dist-info/top_level.txt diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/__init__.py b/third_party/python/taskcluster_taskgraph/taskgraph/__init__.py index 9aef5a8b7e7b7..8605b1f240c6c 100644 --- a/third_party/python/taskcluster_taskgraph/taskgraph/__init__.py +++ b/third_party/python/taskcluster_taskgraph/taskgraph/__init__.py @@ -2,6 +2,7 @@ # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. +__version__ = "5.6.1" # Maximum number of dependencies a single task can have # https://docs.taskcluster.net/reference/platform/taskcluster-queue/references/api#createTask diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/actions/add_new_jobs.py b/third_party/python/taskcluster_taskgraph/taskgraph/actions/add_new_jobs.py index fc10668566407..c5e18215462be 100644 --- a/third_party/python/taskcluster_taskgraph/taskgraph/actions/add_new_jobs.py +++ b/third_party/python/taskcluster_taskgraph/taskgraph/actions/add_new_jobs.py @@ -59,6 +59,6 @@ def add_new_jobs_action(parameters, graph_config, input, task_group_id, task_id) label_to_taskid, parameters, decision_task_id, - i, + f"{i}", ) combine_task_graph_files(list(range(times))) diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/actions/cancel_all.py b/third_party/python/taskcluster_taskgraph/taskgraph/actions/cancel_all.py index b2636f46a3973..d3e0440839e2f 100644 --- a/third_party/python/taskcluster_taskgraph/taskgraph/actions/cancel_all.py +++ b/third_party/python/taskcluster_taskgraph/taskgraph/actions/cancel_all.py @@ -3,9 +3,9 @@ # file, You can obtain one at http://mozilla.org/MPL/2.0/. -import concurrent.futures as futures import logging import os +from concurrent import futures import requests diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/actions/rebuild_cached_tasks.py b/third_party/python/taskcluster_taskgraph/taskgraph/actions/rebuild_cached_tasks.py new file mode 100644 index 0000000000000..2b88e6a698b58 --- /dev/null +++ b/third_party/python/taskcluster_taskgraph/taskgraph/actions/rebuild_cached_tasks.py @@ -0,0 +1,36 @@ +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + +from .registry import register_callback_action +from .util import create_tasks, fetch_graph_and_labels + + +@register_callback_action( + name="rebuild-cached-tasks", + title="Rebuild Cached Tasks", + symbol="rebuild-cached", + description="Rebuild cached tasks.", + order=1000, + context=[], +) +def rebuild_cached_tasks_action( + parameters, graph_config, input, task_group_id, task_id +): + decision_task_id, full_task_graph, label_to_taskid = fetch_graph_and_labels( + parameters, graph_config + ) + cached_tasks = [ + label + for label, task in full_task_graph.tasks.items() + if task.attributes.get("cached_task", False) + ] + if cached_tasks: + create_tasks( + graph_config, + cached_tasks, + full_task_graph, + label_to_taskid, + parameters, + decision_task_id, + ) diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/actions/retrigger.py b/third_party/python/taskcluster_taskgraph/taskgraph/actions/retrigger.py index 4758beb625968..fd488b35fcd5a 100644 --- a/third_party/python/taskcluster_taskgraph/taskgraph/actions/retrigger.py +++ b/third_party/python/taskcluster_taskgraph/taskgraph/actions/retrigger.py @@ -176,7 +176,7 @@ def retrigger_action(parameters, graph_config, input, task_group_id, task_id): label_to_taskid, parameters, decision_task_id, - i, + f"{i}", ) logger.info(f"Scheduled {label}{with_downstream}(time {i + 1}/{times})") diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/actions/util.py b/third_party/python/taskcluster_taskgraph/taskgraph/actions/util.py index dd3248d209200..cf81029da287f 100644 --- a/third_party/python/taskcluster_taskgraph/taskgraph/actions/util.py +++ b/third_party/python/taskcluster_taskgraph/taskgraph/actions/util.py @@ -3,11 +3,11 @@ # file, You can obtain one at http://mozilla.org/MPL/2.0/. -import concurrent.futures as futures import copy import logging import os import re +from concurrent import futures from functools import reduce from requests.exceptions import HTTPError @@ -143,7 +143,7 @@ def create_tasks( If you wish to create the tasks in a new group, leave out decision_task_id. Returns an updated label_to_taskid containing the new tasks""" - if suffix != "": + if suffix: suffix = f"-{suffix}" to_run = set(to_run) diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/config.py b/third_party/python/taskcluster_taskgraph/taskgraph/config.py index 9517a4316c839..7ea7dc7b3370f 100644 --- a/third_party/python/taskcluster_taskgraph/taskgraph/config.py +++ b/third_party/python/taskcluster_taskgraph/taskgraph/config.py @@ -6,8 +6,9 @@ import logging import os import sys +from dataclasses import dataclass +from typing import Dict -import attr from voluptuous import All, Any, Extra, Length, Optional, Required from .util import path @@ -34,6 +35,11 @@ "lowest", ), ), + Optional( + "task-deadline-after", + description="Default 'deadline' for tasks, in relative date format. " + "Eg: '1 week'", + ): optionally_keyed_by("project", str), Required("workers"): { Required("aliases"): { str: { @@ -55,6 +61,10 @@ description="The taskcluster index prefix to use for caching tasks. " "Defaults to `trust-domain`.", ): str, + Optional( + "index-path-regexes", + description="Regular expressions matching index paths to be summarized.", + ): [str], Required("repositories"): All( { str: { @@ -74,10 +84,10 @@ """Schema for GraphConfig""" -@attr.s(frozen=True, cmp=False) +@dataclass(frozen=True, eq=False) class GraphConfig: - _config = attr.ib() - root_dir = attr.ib() + _config: Dict + root_dir: str _PATH_MODIFIED = False @@ -133,4 +143,4 @@ def load_graph_config(root_dir): config = load_yaml(config_yml) validate_graph_config(config) - return GraphConfig(config=config, root_dir=root_dir) + return GraphConfig(config, root_dir=root_dir) diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/create.py b/third_party/python/taskcluster_taskgraph/taskgraph/create.py index 3661ac82717ef..deb1ac5348a77 100644 --- a/third_party/python/taskcluster_taskgraph/taskgraph/create.py +++ b/third_party/python/taskcluster_taskgraph/taskgraph/create.py @@ -3,10 +3,10 @@ # file, You can obtain one at http://mozilla.org/MPL/2.0/. -import concurrent.futures as futures import json import logging import sys +from concurrent import futures from slugid import nice as slugid diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/decision.py b/third_party/python/taskcluster_taskgraph/taskgraph/decision.py index 6c5da8c65d3fb..ed412f4473e6a 100644 --- a/third_party/python/taskcluster_taskgraph/taskgraph/decision.py +++ b/third_party/python/taskcluster_taskgraph/taskgraph/decision.py @@ -227,7 +227,9 @@ def get_decision_parameters(graph_config, options): # ..but can be overridden by the commit message: if it contains the special # string "DONTBUILD" and this is an on-push decision task, then use the # special 'nothing' target task method. - if "DONTBUILD" in commit_message and options["tasks_for"] == "hg-push": + if "DONTBUILD" in commit_message and ( + options["tasks_for"] in ("hg-push", "github-push") + ): parameters["target_tasks_method"] = "nothing" if options.get("optimize_target_tasks") is not None: diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/docker.py b/third_party/python/taskcluster_taskgraph/taskgraph/docker.py index c142f36391cdf..68b377d5b3064 100644 --- a/third_party/python/taskcluster_taskgraph/taskgraph/docker.py +++ b/third_party/python/taskcluster_taskgraph/taskgraph/docker.py @@ -151,7 +151,6 @@ def download_and_modify_image(): req.raise_for_status() with zstd.ZstdDecompressor().stream_reader(req.raw) as ifh: - tarin = tarfile.open( mode="r|", fileobj=ifh, diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/generator.py b/third_party/python/taskcluster_taskgraph/taskgraph/generator.py index e1b900cf658e6..a8ee788f4f81d 100644 --- a/third_party/python/taskcluster_taskgraph/taskgraph/generator.py +++ b/third_party/python/taskcluster_taskgraph/taskgraph/generator.py @@ -5,9 +5,8 @@ import copy import logging import os -from typing import AnyStr - -import attr +from dataclasses import dataclass +from typing import Dict from . import filter_tasks from .config import GraphConfig, load_graph_config @@ -31,19 +30,18 @@ class KindNotFound(Exception): """ -@attr.s(frozen=True) +@dataclass(frozen=True) class Kind: - - name = attr.ib(type=AnyStr) - path = attr.ib(type=AnyStr) - config = attr.ib(type=dict) - graph_config = attr.ib(type=GraphConfig) + name: str + path: str + config: Dict + graph_config: GraphConfig def _get_loader(self): try: loader = self.config["loader"] except KeyError: - raise KeyError(f"{self.path!r} does not define `loader`") + loader = "taskgraph.loader.default:loader" return find_object(loader) def load_tasks(self, parameters, loaded_tasks, write_artifacts): @@ -59,6 +57,9 @@ def load_tasks(self, parameters, loaded_tasks, write_artifacts): transforms = TransformSequence() for xform_path in config["transforms"]: + if ":" not in xform_path: + xform_path = f"{xform_path}:transforms" + transform = find_object(xform_path) transforms.add(transform) @@ -326,6 +327,10 @@ def _run(self): edges = set() for t in full_task_set: for depname, dep in t.dependencies.items(): + if dep not in all_tasks.keys(): + raise Exception( + f"Task '{t.label}' lists a dependency that does not exist: '{dep}'" + ) edges.add((t.label, dep, depname)) full_task_graph = TaskGraph(all_tasks, Graph(full_task_set.graph.nodes, edges)) @@ -353,12 +358,6 @@ def _run(self): yield self.verify("target_task_set", target_task_set, graph_config, parameters) logger.info("Generating target task graph") - # include all docker-image build tasks here, in case they are needed for a graph morph - docker_image_tasks = { - t.label - for t in full_task_graph.tasks.values() - if t.attributes["kind"] == "docker-image" - } # include all tasks with `always_target` set if parameters["enable_always_target"]: always_target_tasks = { @@ -372,7 +371,7 @@ def _run(self): "Adding %d tasks with `always_target` attribute" % (len(always_target_tasks) - len(always_target_tasks & target_tasks)) ) - requested_tasks = target_tasks | docker_image_tasks | always_target_tasks + requested_tasks = target_tasks | always_target_tasks target_graph = full_task_graph.graph.transitive_closure(requested_tasks) target_task_graph = TaskGraph( {l: all_tasks[l] for l in target_graph.nodes}, target_graph diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/graph.py b/third_party/python/taskcluster_taskgraph/taskgraph/graph.py index cdd280e2b1633..36b7f149846c5 100644 --- a/third_party/python/taskcluster_taskgraph/taskgraph/graph.py +++ b/third_party/python/taskcluster_taskgraph/taskgraph/graph.py @@ -4,11 +4,11 @@ import collections +from dataclasses import dataclass +from typing import FrozenSet -import attr - -@attr.s(frozen=True) +@dataclass(frozen=True) class Graph: """Generic representation of a directed acyclic graph with labeled edges connecting the nodes. Graph operations are implemented in a functional @@ -24,8 +24,8 @@ class Graph: node `left` to node `right`.. """ - nodes = attr.ib(converter=frozenset) - edges = attr.ib(converter=frozenset) + nodes: FrozenSet + edges: FrozenSet def transitive_closure(self, nodes, reverse=False): """Return the transitive closure of : the graph containing all diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/loader/default.py b/third_party/python/taskcluster_taskgraph/taskgraph/loader/default.py new file mode 100644 index 0000000000000..5b2c258917b1e --- /dev/null +++ b/third_party/python/taskcluster_taskgraph/taskgraph/loader/default.py @@ -0,0 +1,33 @@ +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + + +import logging + +from .transform import loader as transform_loader + +logger = logging.getLogger(__name__) + + +DEFAULT_TRANSFORMS = [ + "taskgraph.transforms.job:transforms", + "taskgraph.transforms.task:transforms", +] + + +def loader(kind, path, config, params, loaded_tasks): + """ + This default loader builds on the `transform` loader by providing sensible + default transforms that the majority of simple tasks will need. + Specifically, `job` and `task` transforms will be appended to the end of the + list of transforms in the kind being loaded. + """ + transform_refs = config.setdefault("transforms", []) + for t in DEFAULT_TRANSFORMS: + if t in config.get("transforms", ()): + raise KeyError( + f"Transform {t} is already present in the loader's default transforms; it must not be defined in the kind" + ) + transform_refs.extend(DEFAULT_TRANSFORMS) + return transform_loader(kind, path, config, params, loaded_tasks) diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/main.py b/third_party/python/taskcluster_taskgraph/taskgraph/main.py index 88f2f6d37db2f..5f96965acb12e 100644 --- a/third_party/python/taskcluster_taskgraph/taskgraph/main.py +++ b/third_party/python/taskcluster_taskgraph/taskgraph/main.py @@ -189,7 +189,7 @@ def logfile(spec): spec = parameters[0] out = format_taskgraph(options, spec, logfile(spec)) dump_output(out, options["output_file"]) - return + return 0 futures = {} with ProcessPoolExecutor() as executor: @@ -197,11 +197,13 @@ def logfile(spec): f = executor.submit(format_taskgraph, options, spec, logfile(spec)) futures[f] = spec + returncode = 0 for future in as_completed(futures): output_file = options["output_file"] spec = futures[future] e = future.exception() if e: + returncode = 1 out = "".join(traceback.format_exception(type(e), e, e.__traceback__)) if options["diff"]: # Dump to console so we don't accidentally diff the tracebacks. @@ -215,6 +217,8 @@ def logfile(spec): params_spec=spec if len(parameters) > 1 else None, ) + return returncode + @command( "tasks", @@ -364,13 +368,14 @@ def show_taskgraph(options): # branch or bookmark (which are both available on the VCS object) # as `branch` is preferable to a specific revision. cur_rev = repo.branch or repo.head_rev[:12] + cur_rev_file = cur_rev.replace("/", "_") diffdir = tempfile.mkdtemp() atexit.register( shutil.rmtree, diffdir ) # make sure the directory gets cleaned up options["output_file"] = os.path.join( - diffdir, f"{options['graph_attr']}_{cur_rev}" + diffdir, f"{options['graph_attr']}_{cur_rev_file}" ) print(f"Generating {options['graph_attr']} @ {cur_rev}", file=sys.stderr) @@ -408,7 +413,7 @@ def show_taskgraph(options): # to setup its `mach` based logging. setup_logging() - generate_taskgraph(options, parameters, logdir) + ret = generate_taskgraph(options, parameters, logdir) if options["diff"]: assert diffdir is not None @@ -423,15 +428,16 @@ def show_taskgraph(options): base_rev = repo.base_rev else: base_rev = options["diff"] + base_rev_file = base_rev.replace("/", "_") try: repo.update(base_rev) base_rev = repo.head_rev[:12] options["output_file"] = os.path.join( - diffdir, f"{options['graph_attr']}_{base_rev}" + diffdir, f"{options['graph_attr']}_{base_rev_file}" ) print(f"Generating {options['graph_attr']} @ {base_rev}", file=sys.stderr) - generate_taskgraph(options, parameters, logdir) + ret |= generate_taskgraph(options, parameters, logdir) finally: repo.update(cur_rev) @@ -445,8 +451,10 @@ def show_taskgraph(options): ] for spec in parameters: - base_path = os.path.join(diffdir, f"{options['graph_attr']}_{base_rev}") - cur_path = os.path.join(diffdir, f"{options['graph_attr']}_{cur_rev}") + base_path = os.path.join( + diffdir, f"{options['graph_attr']}_{base_rev_file}" + ) + cur_path = os.path.join(diffdir, f"{options['graph_attr']}_{cur_rev_file}") params_name = None if len(parameters) > 1: @@ -457,9 +465,8 @@ def show_taskgraph(options): try: proc = subprocess.run( diffcmd + [base_path, cur_path], - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - universal_newlines=True, + capture_output=True, + text=True, check=True, ) diff_output = proc.stdout @@ -490,6 +497,8 @@ def show_taskgraph(options): if len(parameters) > 1: print(f"See '{logdir}' for logs", file=sys.stderr) + return ret + @command("build-image", help="Build a Docker image") @argument("image_name", help="Name of the image to build") @@ -505,6 +514,7 @@ def show_taskgraph(options): def build_image(args): from taskgraph.docker import build_context, build_image + validate_docker() if args["context_only"] is None: build_image(args["image_name"], args["tag"], os.environ) else: @@ -541,6 +551,7 @@ def load_image(args): if not args.get("image_name") and not args.get("task_id"): print("Specify either IMAGE-NAME or TASK-ID") sys.exit(1) + validate_docker() try: if args["task_id"]: ok = load_image_by_task_id(args["task_id"], args.get("tag")) @@ -553,6 +564,13 @@ def load_image(args): sys.exit(1) +def validate_docker(): + p = subprocess.run(["docker", "ps"], capture_output=True) + if p.returncode != 0: + print("Error connecting to Docker:", p.stderr) + sys.exit(1) + + @command("image-digest", help="Print the digest of a docker image.") @argument( "image_name", @@ -728,6 +746,82 @@ def load_data(filename): sys.exit(1) +@command( + "init", description="Initialize a new Taskgraph setup in a new or existing project." +) +@argument( + "-f", + "--force", + action="store_true", + default=False, + help="Bypass safety checks.", +) +@argument( + "--prompt", + dest="no_input", + action="store_false", + default=True, + help="Prompt for input rather than using default values (advanced).", +) +@argument( + "--template", + default="gh:taskcluster/taskgraph", + help=argparse.SUPPRESS, # used for testing +) +def init_taskgraph(options): + from cookiecutter.main import cookiecutter + + import taskgraph + from taskgraph.util.vcs import get_repository + + repo = get_repository(os.getcwd()) + root = Path(repo.path) + + # Clean up existing installations if necessary. + tc_yml = root.joinpath(".taskcluster.yml") + if tc_yml.is_file(): + if not options["force"]: + proceed = input( + "A Taskcluster setup already exists in this repository, " + "would you like to overwrite it? [y/N]: " + ).lower() + while proceed not in ("y", "yes", "n", "no"): + proceed = input(f"Invalid option '{proceed}'! Try again: ") + + if proceed[0] == "n": + sys.exit(1) + + tc_yml.unlink() + tg_dir = root.joinpath("taskcluster") + if tg_dir.is_dir(): + shutil.rmtree(tg_dir) + + # Populate some defaults from the current repository. + context = {"project_name": root.name} + + repo_url = repo.get_url() + if repo.tool == "git" and "github.com" in repo_url: + context["repo_host"] = "github" + elif repo.tool == "hg" and "hg.mozilla.org" in repo_url: + context["repo_host"] = "hgmo" + else: + raise RuntimeError( + "Repository not supported! Taskgraph currently only " + "supports repositories hosted on Github or hg.mozilla.org." + ) + + # Generate the project. + cookiecutter( + options["template"], + checkout=taskgraph.__version__, + directory="template", + extra_context=context, + no_input=options["no_input"], + output_dir=root.parent, + overwrite_if_exists=True, + ) + + def create_parser(): parser = argparse.ArgumentParser(description="Interact with taskgraph") subparsers = parser.add_subparsers() @@ -750,7 +844,7 @@ def main(args=sys.argv[1:]): parser = create_parser() args = parser.parse_args(args) try: - args.command(vars(args)) + return args.command(vars(args)) except Exception: traceback.print_exc() sys.exit(1) diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/morph.py b/third_party/python/taskcluster_taskgraph/taskgraph/morph.py index c488317782411..bfa1560270557 100644 --- a/third_party/python/taskcluster_taskgraph/taskgraph/morph.py +++ b/third_party/python/taskcluster_taskgraph/taskgraph/morph.py @@ -110,18 +110,6 @@ def derive_index_task(task, taskgraph, label_to_taskid, parameters, graph_config return task, taskgraph, label_to_taskid -# these regular expressions capture route prefixes for which we have a star -# scope, allowing them to be summarized. Each should correspond to a star scope -# in each Gecko `assume:repo:hg.mozilla.org/...` role. -_SCOPE_SUMMARY_REGEXPS = [ - # TODO Bug 1631839 - Remove these scopes once the migration is done - re.compile(r"(index:insert-task:project\.mobile\.fenix\.v2\.[^.]*\.).*"), - re.compile( - r"(index:insert-task:project\.mobile\.reference-browser\.v3\.[^.]*\.).*" - ), -] - - def make_index_task(parent_task, taskgraph, label_to_taskid, parameters, graph_config): index_paths = [ r.split(".", 1)[1] for r in parent_task.task["routes"] if r.startswith("index.") @@ -138,19 +126,21 @@ def make_index_task(parent_task, taskgraph, label_to_taskid, parameters, graph_c # namespace-heavy index task might have more scopes than can fit in a # temporary credential. scopes = set() - domain_scope_regex = re.compile( - r"(index:insert-task:{trust_domain}\.v2\.[^.]*\.).*".format( + domain_index_regex = re.compile( + r"({trust_domain}\.v2\.[^.]*\.).*".format( trust_domain=re.escape(graph_config["trust-domain"]) ) ) - all_scopes_summary_regexps = _SCOPE_SUMMARY_REGEXPS + [domain_scope_regex] + index_path_res = [domain_index_regex] + for path in graph_config["taskgraph"].get("index-path-regexes", ()): + index_path_res.append(re.compile(path)) for path in index_paths: - scope = f"index:insert-task:{path}" - for summ_re in all_scopes_summary_regexps: - match = summ_re.match(scope) + for index_path_re in index_path_res: + match = index_path_re.match(path) if match: - scope = match.group(1) + "*" + path = match.group(1) + "*" break + scope = f"index:insert-task:{path}" scopes.add(scope) task.task["scopes"] = sorted(scopes) diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/optimize.py b/third_party/python/taskcluster_taskgraph/taskgraph/optimize.py new file mode 100644 index 0000000000000..c146d9a045d39 --- /dev/null +++ b/third_party/python/taskcluster_taskgraph/taskgraph/optimize.py @@ -0,0 +1,471 @@ +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +""" +The objective of optimization is to remove as many tasks from the graph as +possible, as efficiently as possible, thereby delivering useful results as +quickly as possible. For example, ideally if only a test script is modified in +a push, then the resulting graph contains only the corresponding test suite +task. + +See ``taskcluster/docs/optimization.rst`` for more information. +""" + + +import logging +import os +from collections import defaultdict + +from slugid import nice as slugid + +from . import files_changed +from .graph import Graph +from .taskgraph import TaskGraph +from .util.parameterization import resolve_task_references +from .util.taskcluster import find_task_id + +logger = logging.getLogger(__name__) + +TOPSRCDIR = os.path.abspath(os.path.join(__file__, "../../../")) + + +def optimize_task_graph( + target_task_graph, + params, + do_not_optimize, + decision_task_id, + existing_tasks=None, + strategies=None, +): + """ + Perform task optimization, returning a taskgraph and a map from label to + assigned taskId, including replacement tasks. + """ + label_to_taskid = {} + if not existing_tasks: + existing_tasks = {} + + # instantiate the strategies for this optimization process + if not strategies: + strategies = _make_default_strategies() + + optimizations = _get_optimizations(target_task_graph, strategies) + + removed_tasks = remove_tasks( + target_task_graph=target_task_graph, + optimizations=optimizations, + params=params, + do_not_optimize=do_not_optimize, + ) + + replaced_tasks = replace_tasks( + target_task_graph=target_task_graph, + optimizations=optimizations, + params=params, + do_not_optimize=do_not_optimize, + label_to_taskid=label_to_taskid, + existing_tasks=existing_tasks, + removed_tasks=removed_tasks, + ) + + return ( + get_subgraph( + target_task_graph, + removed_tasks, + replaced_tasks, + label_to_taskid, + decision_task_id, + ), + label_to_taskid, + ) + + +def _make_default_strategies(): + return { + "never": OptimizationStrategy(), # "never" is the default behavior + "index-search": IndexSearch(), + "skip-unless-changed": SkipUnlessChanged(), + } + + +def _get_optimizations(target_task_graph, strategies): + def optimizations(label): + task = target_task_graph.tasks[label] + if task.optimization: + opt_by, arg = list(task.optimization.items())[0] + return (opt_by, strategies[opt_by], arg) + else: + return ("never", strategies["never"], None) + + return optimizations + + +def _log_optimization(verb, opt_counts, opt_reasons=None): + if opt_reasons: + message = "optimize: {label} {action} because of {reason}" + for label, (action, reason) in opt_reasons.items(): + logger.debug(message.format(label=label, action=action, reason=reason)) + + if opt_counts: + logger.info( + f"{verb.title()} " + + ", ".join(f"{c} tasks by {b}" for b, c in sorted(opt_counts.items())) + + " during optimization." + ) + else: + logger.info(f"No tasks {verb} during optimization") + + +def remove_tasks(target_task_graph, params, optimizations, do_not_optimize): + """ + Implement the "Removing Tasks" phase, returning a set of task labels of all removed tasks. + """ + opt_counts = defaultdict(int) + opt_reasons = {} + removed = set() + dependents_of = target_task_graph.graph.reverse_links_dict() + tasks = target_task_graph.tasks + prune_candidates = set() + + # Traverse graph so dependents (child nodes) are guaranteed to be processed + # first. + for label in target_task_graph.graph.visit_preorder(): + # Dependents that can be pruned away (shouldn't cause this task to run). + # Only dependents that either: + # A) Explicitly reference this task in their 'if_dependencies' list, or + # B) Don't have an 'if_dependencies' attribute (i.e are in 'prune_candidates' + # because they should be removed but have prune_deps themselves) + # should be considered. + prune_deps = { + l + for l in dependents_of[label] + if l in prune_candidates + if not tasks[l].if_dependencies or label in tasks[l].if_dependencies + } + + def _keep(reason): + """Mark a task as being kept in the graph. Also recursively removes + any dependents from `prune_candidates`, assuming they should be + kept because of this task. + """ + opt_reasons[label] = ("kept", reason) + + # Removes dependents that were in 'prune_candidates' from a task + # that ended up being kept (and therefore the dependents should + # also be kept). + queue = list(prune_deps) + while queue: + l = queue.pop() + + # If l is a prune_dep of multiple tasks it could be queued up + # multiple times. Guard against it being already removed. + if l not in prune_candidates: + continue + + # If a task doesn't set 'if_dependencies' itself (rather it was + # added to 'prune_candidates' due to one of its depenendents), + # then we shouldn't remove it. + if not tasks[l].if_dependencies: + continue + + prune_candidates.remove(l) + queue.extend([r for r in dependents_of[l] if r in prune_candidates]) + + def _remove(reason): + """Potentially mark a task as being removed from the graph. If the + task has dependents that can be pruned, add this task to + `prune_candidates` rather than removing it. + """ + if prune_deps: + # If there are prune_deps, unsure if we can remove this task yet. + prune_candidates.add(label) + else: + opt_reasons[label] = ("removed", reason) + opt_counts[reason] += 1 + removed.add(label) + + # if we're not allowed to optimize, that's easy.. + if label in do_not_optimize: + _keep("do not optimize") + continue + + # If there are remaining tasks depending on this one, do not remove. + if any( + l for l in dependents_of[label] if l not in removed and l not in prune_deps + ): + _keep("dependent tasks") + continue + + # Call the optimization strategy. + task = tasks[label] + opt_by, opt, arg = optimizations(label) + if opt.should_remove_task(task, params, arg): + _remove(opt_by) + continue + + # Some tasks should only run if their dependency was also run. Since we + # haven't processed dependencies yet, we add them to a list of + # candidate tasks for pruning. + if task.if_dependencies: + opt_reasons[label] = ("kept", opt_by) + prune_candidates.add(label) + else: + _keep(opt_by) + + if prune_candidates: + reason = "if-dependencies pruning" + for label in prune_candidates: + # There's an edge case where a triangle graph can cause a + # dependency to stay in 'prune_candidates' when the dependent + # remains. Do a final check to ensure we don't create any bad + # edges. + dependents = any( + d + for d in dependents_of[label] + if d not in prune_candidates + if d not in removed + ) + if dependents: + opt_reasons[label] = ("kept", "dependent tasks") + continue + removed.add(label) + opt_counts[reason] += 1 + opt_reasons[label] = ("removed", reason) + + _log_optimization("removed", opt_counts, opt_reasons) + return removed + + +def replace_tasks( + target_task_graph, + params, + optimizations, + do_not_optimize, + label_to_taskid, + removed_tasks, + existing_tasks, +): + """ + Implement the "Replacing Tasks" phase, returning a set of task labels of + all replaced tasks. The replacement taskIds are added to label_to_taskid as + a side-effect. + """ + opt_counts = defaultdict(int) + replaced = set() + links_dict = target_task_graph.graph.links_dict() + + for label in target_task_graph.graph.visit_postorder(): + # if we're not allowed to optimize, that's easy.. + if label in do_not_optimize: + continue + + # if this task depends on un-replaced, un-removed tasks, do not replace + if any(l not in replaced and l not in removed_tasks for l in links_dict[label]): + continue + + # if the task already exists, that's an easy replacement + repl = existing_tasks.get(label) + if repl: + label_to_taskid[label] = repl + replaced.add(label) + opt_counts["existing_tasks"] += 1 + continue + + # call the optimization strategy + task = target_task_graph.tasks[label] + opt_by, opt, arg = optimizations(label) + repl = opt.should_replace_task(task, params, arg) + if repl: + if repl is True: + # True means remove this task; get_subgraph will catch any + # problems with removed tasks being depended on + removed_tasks.add(label) + else: + label_to_taskid[label] = repl + replaced.add(label) + opt_counts[opt_by] += 1 + continue + + _log_optimization("replaced", opt_counts) + return replaced + + +def get_subgraph( + target_task_graph, + removed_tasks, + replaced_tasks, + label_to_taskid, + decision_task_id, +): + """ + Return the subgraph of target_task_graph consisting only of + non-optimized tasks and edges between them. + + To avoid losing track of taskIds for tasks optimized away, this method + simultaneously substitutes real taskIds for task labels in the graph, and + populates each task definition's `dependencies` key with the appropriate + taskIds. Task references are resolved in the process. + """ + + # check for any dependency edges from included to removed tasks + bad_edges = [ + (l, r, n) + for l, r, n in target_task_graph.graph.edges + if l not in removed_tasks and r in removed_tasks + ] + if bad_edges: + probs = ", ".join( + f"{l} depends on {r} as {n} but it has been removed" + for l, r, n in bad_edges + ) + raise Exception("Optimization error: " + probs) + + # fill in label_to_taskid for anything not removed or replaced + assert replaced_tasks <= set(label_to_taskid) + for label in sorted( + target_task_graph.graph.nodes - removed_tasks - set(label_to_taskid) + ): + label_to_taskid[label] = slugid() + + # resolve labels to taskIds and populate task['dependencies'] + tasks_by_taskid = {} + named_links_dict = target_task_graph.graph.named_links_dict() + omit = removed_tasks | replaced_tasks + for label, task in target_task_graph.tasks.items(): + if label in omit: + continue + task.task_id = label_to_taskid[label] + named_task_dependencies = { + name: label_to_taskid[label] + for name, label in named_links_dict.get(label, {}).items() + } + + # Add remaining soft dependencies + if task.soft_dependencies: + named_task_dependencies.update( + { + label: label_to_taskid[label] + for label in task.soft_dependencies + if label in label_to_taskid and label not in omit + } + ) + + task.task = resolve_task_references( + task.label, + task.task, + task_id=task.task_id, + decision_task_id=decision_task_id, + dependencies=named_task_dependencies, + ) + deps = task.task.setdefault("dependencies", []) + deps.extend(sorted(named_task_dependencies.values())) + tasks_by_taskid[task.task_id] = task + + # resolve edges to taskIds + edges_by_taskid = ( + (label_to_taskid.get(left), label_to_taskid.get(right), name) + for (left, right, name) in target_task_graph.graph.edges + ) + # ..and drop edges that are no longer entirely in the task graph + # (note that this omits edges to replaced tasks, but they are still in task.dependnecies) + edges_by_taskid = { + (left, right, name) + for (left, right, name) in edges_by_taskid + if left in tasks_by_taskid and right in tasks_by_taskid + } + + return TaskGraph(tasks_by_taskid, Graph(set(tasks_by_taskid), edges_by_taskid)) + + +class OptimizationStrategy: + def should_remove_task(self, task, params, arg): + """Determine whether to optimize this task by removing it. Returns + True to remove.""" + return False + + def should_replace_task(self, task, params, arg): + """Determine whether to optimize this task by replacing it. Returns a + taskId to replace this task, True to replace with nothing, or False to + keep the task.""" + return False + + +class Either(OptimizationStrategy): + """Given one or more optimization strategies, remove a task if any of them + says to, and replace with a task if any finds a replacement (preferring the + earliest). By default, each substrategy gets the same arg, but split_args + can return a list of args for each strategy, if desired.""" + + def __init__(self, *substrategies, **kwargs): + self.substrategies = substrategies + self.split_args = kwargs.pop("split_args", None) + if not self.split_args: + self.split_args = lambda arg: [arg] * len(substrategies) + if kwargs: + raise TypeError("unexpected keyword args") + + def _for_substrategies(self, arg, fn): + for sub, arg in zip(self.substrategies, self.split_args(arg)): + rv = fn(sub, arg) + if rv: + return rv + return False + + def should_remove_task(self, task, params, arg): + return self._for_substrategies( + arg, lambda sub, arg: sub.should_remove_task(task, params, arg) + ) + + def should_replace_task(self, task, params, arg): + return self._for_substrategies( + arg, lambda sub, arg: sub.should_replace_task(task, params, arg) + ) + + +class IndexSearch(OptimizationStrategy): + + # A task with no dependencies remaining after optimization will be replaced + # if artifacts exist for the corresponding index_paths. + # Otherwise, we're in one of the following cases: + # - the task has un-optimized dependencies + # - the artifacts have expired + # - some changes altered the index_paths and new artifacts need to be + # created. + # In every of those cases, we need to run the task to create or refresh + # artifacts. + + def should_replace_task(self, task, params, index_paths): + "Look for a task with one of the given index paths" + for index_path in index_paths: + try: + task_id = find_task_id( + index_path, use_proxy=bool(os.environ.get("TASK_ID")) + ) + return task_id + except KeyError: + # 404 will end up here and go on to the next index path + pass + + return False + + +class SkipUnlessChanged(OptimizationStrategy): + def should_remove_task(self, task, params, file_patterns): + if params.get("repository_type") != "hg": + raise RuntimeError( + "SkipUnlessChanged optimization only works with mercurial repositories" + ) + + # pushlog_id == -1 - this is the case when run from a cron.yml job + if params.get("pushlog_id") == -1: + return False + + changed = files_changed.check(params, file_patterns) + if not changed: + logger.debug( + 'no files found matching a pattern in `skip-unless-changed` for "{}"'.format( + task.label + ) + ) + return True + return False diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/optimize/strategies.py b/third_party/python/taskcluster_taskgraph/taskgraph/optimize/strategies.py index c6846e60c5569..973b55063266c 100644 --- a/third_party/python/taskcluster_taskgraph/taskgraph/optimize/strategies.py +++ b/third_party/python/taskcluster_taskgraph/taskgraph/optimize/strategies.py @@ -10,7 +10,6 @@ @register_strategy("index-search") class IndexSearch(OptimizationStrategy): - # A task with no dependencies remaining after optimization will be replaced # if artifacts exist for the corresponding index_paths. # Otherwise, we're in one of the following cases: diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/run-task/fetch-content b/third_party/python/taskcluster_taskgraph/taskgraph/run-task/fetch-content index 42dc5e2b284f6..0af923d01d047 100755 --- a/third_party/python/taskcluster_taskgraph/taskgraph/run-task/fetch-content +++ b/third_party/python/taskcluster_taskgraph/taskgraph/run-task/fetch-content @@ -190,8 +190,8 @@ def stream_download(url, sha256=None, size=None, headers=None): req = urllib.request.Request(url, None, req_headers) with urllib.request.urlopen( - req, cafile=certifi.where() - ) if certifi else urllib.request.urlopen(req) as fh: + req, timeout=60, cafile=certifi.where() + ) if certifi else urllib.request.urlopen(req, timeout=60) as fh: if not url.endswith(".gz") and fh.info().get("Content-Encoding") == "gzip": fh = gzip.GzipFile(fileobj=fh) @@ -321,7 +321,7 @@ def open_tar_stream(path: pathlib.Path): """""" if path.suffix == ".bz2": return bz2.open(str(path), "rb") - elif path.suffix == ".gz": + elif path.suffix in (".gz", ".tgz") : return gzip.open(str(path), "rb") elif path.suffix == ".xz": return lzma.open(str(path), "rb") @@ -336,7 +336,7 @@ def open_tar_stream(path: pathlib.Path): def archive_type(path: pathlib.Path): """Attempt to identify a path as an extractable archive.""" - if path.suffixes[-2:-1] == [".tar"]: + if path.suffixes[-2:-1] == [".tar"] or path.suffixes[-1:] == [".tgz"]: return "tar" elif path.suffix == ".zip": return "zip" @@ -374,7 +374,7 @@ def extract_archive(path, dest_dir, typ): elif typ == "zip": # unzip from stdin has wonky behavior. We don't use a pipe for it. ifh = open(os.devnull, "rb") - args = ["unzip", "-o", str(path)] + args = ["unzip", "-q", "-o", str(path)] pipe_stdin = False else: raise ValueError("unknown archive format: %s" % path) diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/run-task/robustcheckout.py b/third_party/python/taskcluster_taskgraph/taskgraph/run-task/robustcheckout.py index 7e12d07d5030e..b5d2230211e87 100644 --- a/third_party/python/taskcluster_taskgraph/taskgraph/run-task/robustcheckout.py +++ b/third_party/python/taskcluster_taskgraph/taskgraph/run-task/robustcheckout.py @@ -41,7 +41,9 @@ # Causes worker to purge caches on process exit and for task to retry. EXIT_PURGE_CACHE = 72 -testedwith = b"4.5 4.6 4.7 4.8 4.9 5.0 5.1 5.2 5.3 5.4 5.5 5.6 5.7 5.8 5.9" +testedwith = ( + b"4.5 4.6 4.7 4.8 4.9 5.0 5.1 5.2 5.3 5.4 5.5 5.6 5.7 5.8 5.9 6.0 6.1 6.2 6.3 6.4" +) minimumhgversion = b"4.5" cmdtable = {} @@ -177,6 +179,8 @@ def robustcheckout( # worker.backgroundclose only makes things faster if running anti-virus, # which our automation doesn't. Disable it. ui.setconfig(b"worker", b"backgroundclose", False) + # Don't wait forever if the connection hangs + ui.setconfig(b"http", b"timeout", 600) # By default the progress bar starts after 3s and updates every 0.1s. We # change this so it shows and updates every 1.0s. @@ -494,6 +498,10 @@ def handlepullerror(e): ui.warn(b"ssl error: %s\n" % pycompat.bytestr(str(e))) handlenetworkfailure() return True + elif isinstance(e, urllibcompat.urlerr.httperror) and e.code >= 500: + ui.warn(b"http error: %s\n" % pycompat.bytestr(str(e.reason))) + handlenetworkfailure() + return True elif isinstance(e, urllibcompat.urlerr.urlerror): if isinstance(e.reason, socket.error): ui.warn(b"socket error: %s\n" % pycompat.bytestr(str(e.reason))) @@ -507,6 +515,10 @@ def handlepullerror(e): pycompat.bytestr(str(e.reason)), ) ) + elif isinstance(e, socket.timeout): + ui.warn(b"socket timeout\n") + handlenetworkfailure() + return True else: ui.warn( b"unhandled exception during network operation; type: %s; " @@ -527,7 +539,12 @@ def handlepullerror(e): rootnode = peerlookup(clonepeer, b"0") except error.RepoLookupError: raise error.Abort(b"unable to resolve root revision from clone " b"source") - except (error.Abort, ssl.SSLError, urllibcompat.urlerr.urlerror) as e: + except ( + error.Abort, + ssl.SSLError, + urllibcompat.urlerr.urlerror, + socket.timeout, + ) as e: if handlepullerror(e): return callself() raise @@ -620,7 +637,12 @@ def handlepullerror(e): shareopts=shareopts, stream=True, ) - except (error.Abort, ssl.SSLError, urllibcompat.urlerr.urlerror) as e: + except ( + error.Abort, + ssl.SSLError, + urllibcompat.urlerr.urlerror, + socket.timeout, + ) as e: if handlepullerror(e): return callself() raise @@ -688,7 +710,12 @@ def handlepullerror(e): pullop = exchange.pull(repo, remote, heads=pullrevs) if not pullop.rheads: raise error.Abort(b"unable to pull requested revision") - except (error.Abort, ssl.SSLError, urllibcompat.urlerr.urlerror) as e: + except ( + error.Abort, + ssl.SSLError, + urllibcompat.urlerr.urlerror, + socket.timeout, + ) as e: if handlepullerror(e): return callself() raise @@ -785,7 +812,14 @@ def handlepullerror(e): # one to change the sparse profile and another to update to the new # revision. This is not desired. But there's not a good API in # Mercurial to do this as one operation. - with repo.wlock(), repo.dirstate.parentchange(), timeit( + # TRACKING hg64 - Mercurial 6.4 and later require call to + # dirstate.changing_parents(repo) + def parentchange(repo): + if util.safehasattr(repo.dirstate, "changing_parents"): + return repo.dirstate.changing_parents(repo) + return repo.dirstate.parentchange() + + with repo.wlock(), parentchange(repo), timeit( "sparse_update_config", "sparse-update-config" ): # pylint --py3k: W1636 diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/run-task/run-task b/third_party/python/taskcluster_taskgraph/taskgraph/run-task/run-task index f1e281f5cd3e9..aaf632b9a7b7f 100755 --- a/third_party/python/taskcluster_taskgraph/taskgraph/run-task/run-task +++ b/third_party/python/taskcluster_taskgraph/taskgraph/run-task/run-task @@ -14,20 +14,18 @@ current time to improve log usefulness. """ import sys -from typing import Optional if sys.version_info[0:2] < (3, 5): print("run-task requires Python 3.5+") sys.exit(1) - import argparse import datetime import errno import io import json import os -from pathlib import Path +import platform import re import shutil import signal @@ -35,21 +33,23 @@ import socket import stat import subprocess import time - import urllib.error import urllib.request - +from pathlib import Path from threading import Thread +from typing import Optional SECRET_BASEURL_TPL = "http://taskcluster/secrets/v1/secret/{}" GITHUB_SSH_FINGERPRINT = ( b"github.com ssh-rsa " - b"AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkcc" - b"Krpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFz" - b"LQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaS" - b"jB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3sku" - b"a2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==\n" + b"AAAAB3NzaC1yc2EAAAADAQABAAABgQCj7ndNxQowgcQnjshcLrqPEiiphnt+VTTvDP6mHBL9j1aNUkY" + b"4Ue1gvwnGLVlOhGeYrnZaMgRK6+PKCUXaDbC7qtbW8gIkhL7aGCsOr/C56SJMy/BCZfxd1nWzAOxSDP" + b"gVsmerOBYfNqltV9/hWCqBywINIR+5dIg6JTJ72pcEpEjcYgXkE2YEFXV1JHnsKgbLWNlhScqb2UmyR" + b"kQyytRLtL+38TGxkxCflmO+5Z8CSSNY7GidjMIZ7Q4zMjA2n1nGrlTDkzwDCsw+wqFPGQA179cnfGWO" + b"WRVruj16z6XyvxvjJwbz0wQZ75XK5tKSb7FNyeIEs4TT4jk+S4dhPeAUC5y+bDYirYgM4GC7uEnztnZ" + b"yaVWQ7B381AK4Qdrwt51ZqExKbQpTUNn+EjqoTwvqNj4kqx5QUCI0ThS/YkOxJCXmPUWZbhjpCg56i+" + b"2aB6CmK2JGhn57K5mj0MNdBXA4/WnwH6XoPWJzK5Nyu2zB3nAZp+S5hpQs+p1vN1/wsjk=\n" ) @@ -594,7 +594,12 @@ def git_checkout( ssh_key_file: Optional[Path], ssh_known_hosts_file: Optional[Path], ): - env = {"PYTHONUNBUFFERED": "1"} + env = { + # abort if transfer speed is lower than 1kB/s for 1 minute + "GIT_HTTP_LOW_SPEED_LIMIT": "1024", + "GIT_HTTP_LOW_SPEED_TIME": "60", + "PYTHONUNBUFFERED": "1", + } if ssh_key_file and ssh_known_hosts_file: if not ssh_key_file.exists(): @@ -641,12 +646,30 @@ def git_checkout( # between the previous state before the force-push and the current state. # # Unlike base_ref just above, there is no need to checkout the revision: - # it's immediately avaiable after the fetch. + # it's immediately available after the fetch. if base_rev and base_rev != NULL_REVISION: args = ["git", "fetch", "origin", base_rev] retry_required_command(b"vcs", args, cwd=destination_path, extra_env=env) + # If a ref was provided, it might be tag, so we need to make sure we fetch + # those. This is explicitly only done when base and head repo match, + # because it is the only scenario where tags could be present. (PRs, for + # example, always include an explicit rev.) Failure to do this could result + # in not having a tag, or worse: having an outdated version of one. + # `--force` is needed to be able to update an existing tag. + if ref and base_repo == head_repo: + args = [ + "git", + "fetch", + "--tags", + "--force", + base_repo, + ref, + ] + + retry_required_command(b"vcs", args, cwd=destination_path, extra_env=env) + # If a ref isn't provided, we fetch all refs from head_repo, which may be slow args = [ "git", @@ -666,7 +689,9 @@ def git_checkout( if ref: args.extend(["-B", ref]) - args.append(commit if commit else ref) + + # `git fetch` set `FETCH_HEAD` reference to the last commit of the desired branch + args.append(commit if commit else "FETCH_HEAD") run_required_command(b"vcs", args, cwd=destination_path) @@ -683,6 +708,7 @@ def git_checkout( "git", "submodule", "update", + "--force", # Overrides any potential local changes ] run_required_command(b"vcs", args, cwd=destination_path) @@ -898,7 +924,6 @@ def collect_vcs_options(args, project, name): def vcs_checkout_from_args(options): - if not options["checkout"]: if options["ref"] and not options["revision"]: print("task should be defined in terms of non-symbolic revision") @@ -1045,12 +1070,23 @@ def maybe_run_resource_monitoring(): return process +def _display_python_version(): + print_line( + b"setup", b"Python version: %s\n" % platform.python_version().encode("utf-8") + ) + + def main(args): os.environ["TASK_WORKDIR"] = os.getcwd() print_line( b"setup", b"run-task started in %s\n" % os.environ["TASK_WORKDIR"].encode("utf-8"), ) + print_line( + b"setup", + b"Invoked by command: %s\n" % " ".join(args).encode("utf-8"), + ) + _display_python_version() running_as_root = IS_POSIX and os.getuid() == 0 # Arguments up to '--' are ours. After are for the main task @@ -1090,7 +1126,7 @@ def main(args): # Sort repositories so that parent checkout paths come before children repositories.sort(key=lambda repo: Path(repo["checkout"] or "/").parts) - uid = gid = gids = None + uid = gid = gids = user = group = None if IS_POSIX and running_as_root: user, group, gids = get_posix_user_group(args.user, args.group) uid = user.pw_uid diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/task.py b/third_party/python/taskcluster_taskgraph/taskgraph/task.py index a38a52b38e012..45427ac4f7663 100644 --- a/third_party/python/taskcluster_taskgraph/taskgraph/task.py +++ b/third_party/python/taskcluster_taskgraph/taskgraph/task.py @@ -2,11 +2,11 @@ # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. +from dataclasses import dataclass, field +from typing import Any, Dict, List, Union -import attr - -@attr.s +@dataclass class Task: """ Representation of a task in a TaskGraph. Each Task has, at creation: @@ -31,18 +31,18 @@ class Task: display, comparison, serialization, etc. It has no functionality of its own. """ - kind = attr.ib() - label = attr.ib() - attributes = attr.ib() - task = attr.ib() - description = attr.ib(default="") - task_id = attr.ib(default=None, init=False) - optimization = attr.ib(default=None) - dependencies = attr.ib(factory=dict) - soft_dependencies = attr.ib(factory=list) - if_dependencies = attr.ib(factory=list) + kind: str + label: str + attributes: Dict + task: Dict + description: str = "" + task_id: Union[str, None] = field(default=None, init=False) + optimization: Union[Dict[str, Any], None] = field(default=None) + dependencies: Dict = field(default_factory=dict) + soft_dependencies: List = field(default_factory=list) + if_dependencies: List = field(default_factory=list) - def __attrs_post_init__(self): + def __post_init__(self): self.attributes["kind"] = self.kind def to_json(self): diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/taskgraph.py b/third_party/python/taskcluster_taskgraph/taskgraph/taskgraph.py index 158cfb861c7fa..e479a7cf15e18 100644 --- a/third_party/python/taskcluster_taskgraph/taskgraph/taskgraph.py +++ b/third_party/python/taskcluster_taskgraph/taskgraph/taskgraph.py @@ -2,14 +2,14 @@ # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. - -import attr +from dataclasses import dataclass +from typing import List from .graph import Graph from .task import Task -@attr.s(frozen=True) +@dataclass(frozen=True) class TaskGraph: """ Representation of a task graph. @@ -21,10 +21,10 @@ class TaskGraph: tasks are "linked from" their dependents. """ - tasks = attr.ib() - graph = attr.ib() + tasks: List[Task] + graph: Graph - def __attrs_post_init__(self): + def __post_init__(self): assert set(self.tasks) == self.graph.nodes def for_each_task(self, f, *args, **kwargs): diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/transforms/__init__.py b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/__init__.py index e69de29bb2d1d..4fa7b5fc0c1d0 100644 --- a/third_party/python/taskcluster_taskgraph/taskgraph/transforms/__init__.py +++ b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/__init__.py @@ -0,0 +1,3 @@ +from taskgraph.transforms import ( # noqa: Added for backwards compat + notify as release_notifications, +) diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/transforms/base.py b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/base.py index 383e6a47981e7..e6fcd2400c477 100644 --- a/third_party/python/taskcluster_taskgraph/taskgraph/transforms/base.py +++ b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/base.py @@ -4,9 +4,10 @@ import re -from typing import AnyStr +from dataclasses import dataclass, field +from typing import Dict, List, Union -import attr +from taskgraph.task import Task from ..config import GraphConfig from ..parameters import Parameters @@ -14,20 +15,20 @@ from ..util.schema import Schema, validate_schema -@attr.s(frozen=True) +@dataclass(frozen=True) class RepoConfig: - prefix = attr.ib(type=str) - name = attr.ib(type=str) - base_repository = attr.ib(type=str) - head_repository = attr.ib(type=str) - head_ref = attr.ib(type=str) - type = attr.ib(type=str) - path = attr.ib(type=str, default="") - head_rev = attr.ib(type=str, default=None) - ssh_secret_name = attr.ib(type=str, default=None) - - -@attr.s(frozen=True, cmp=False) + prefix: str + name: str + base_repository: str + head_repository: str + head_ref: str + type: str + path: str = "" + head_rev: Union[str, None] = None + ssh_secret_name: Union[str, None] = None + + +@dataclass(frozen=True, eq=False) class TransformConfig: """ A container for configuration affecting transforms. The `config` argument @@ -35,26 +36,26 @@ class TransformConfig: """ # the name of the current kind - kind = attr.ib() + kind: str # the path to the kind configuration directory - path = attr.ib(type=AnyStr) + path: str # the parsed contents of kind.yml - config = attr.ib(type=dict) + config: Dict # the parameters for this task-graph generation run - params = attr.ib(type=Parameters) + params: Parameters # a dict of all the tasks associated with the kind dependencies of the # current kind - kind_dependencies_tasks = attr.ib(type=dict) + kind_dependencies_tasks: Dict[str, Task] # Global configuration of the taskgraph - graph_config = attr.ib(type=GraphConfig) + graph_config: GraphConfig # whether to write out artifacts for the decision task - write_artifacts = attr.ib(type=bool) + write_artifacts: bool @property @memoize @@ -106,7 +107,7 @@ def repo_configs(self): return repo_configs -@attr.s() +@dataclass() class TransformSequence: """ Container for a sequence of transforms. Each transform is represented as a @@ -118,7 +119,7 @@ class TransformSequence: sequence. """ - _transforms = attr.ib(factory=list) + _transforms: List = field(default_factory=list) def __call__(self, config, items): for xform in self._transforms: @@ -135,9 +136,9 @@ def add_validate(self, schema): self.add(ValidateSchema(schema)) -@attr.s +@dataclass class ValidateSchema: - schema = attr.ib(type=Schema) + schema: Schema def __call__(self, config, tasks): for task in tasks: diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/transforms/docker_image.py b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/docker_image.py index dd7c01e5a9992..d0c5b9c97b854 100644 --- a/third_party/python/taskcluster_taskgraph/taskgraph/transforms/docker_image.py +++ b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/docker_image.py @@ -24,9 +24,9 @@ DIGEST_RE = re.compile("^[0-9a-f]{64}$") IMAGE_BUILDER_IMAGE = ( - "taskcluster/image_builder:4.0.0" + "mozillareleases/image_builder:5.0.0" "@sha256:" - "866c304445334703b68653e1390816012c9e6bdabfbd1906842b5b229e8ed044" + "e510a9a9b80385f71c112d61b2f2053da625aff2b6d430411ac42e424c58953f" ) transforms = TransformSequence() @@ -141,6 +141,7 @@ def fill_template(config, tasks): "image_name": image_name, "artifact_prefix": "public", }, + "always-target": True, "expires-after": "28 days" if config.params.is_try() else "1 year", "scopes": [], "run-on-projects": [], diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/transforms/fetch.py b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/fetch.py index 65d4b62482346..bcb8ff38a691f 100644 --- a/third_party/python/taskcluster_taskgraph/taskgraph/transforms/fetch.py +++ b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/fetch.py @@ -8,8 +8,9 @@ import os import re +from dataclasses import dataclass +from typing import Callable -import attr from voluptuous import Extra, Optional, Required import taskgraph @@ -56,10 +57,10 @@ fetch_builders = {} -@attr.s(frozen=True) +@dataclass(frozen=True) class FetchBuilder: - schema = attr.ib(type=Schema) - builder = attr.ib() + schema: Schema + builder: Callable def fetch_builder(name, schema): diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/transforms/from_deps.py b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/from_deps.py new file mode 100644 index 0000000000000..bf87cd3d19f21 --- /dev/null +++ b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/from_deps.py @@ -0,0 +1,188 @@ +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + +""" +Transforms used to create tasks based on the kind dependencies, filtering on +common attributes like the ``build-type``. + +These transforms are useful when follow-up tasks are needed for some +indeterminate subset of existing tasks. For example, running a signing task +after each build task, whatever builds may exist. +""" +from copy import deepcopy +from textwrap import dedent + +from voluptuous import Any, Extra, Optional, Required + +from taskgraph.transforms.base import TransformSequence +from taskgraph.util.attributes import attrmatch +from taskgraph.util.dependencies import GROUP_BY_MAP +from taskgraph.util.schema import Schema + +FROM_DEPS_SCHEMA = Schema( + { + Required("from-deps"): { + Optional( + "kinds", + description=dedent( + """ + Limit dependencies to specified kinds (defaults to all kinds in + `kind-dependencies`). + + The first kind in the list is the "primary" kind. The + dependency of this kind will be used to derive the label + and copy attributes (if `copy-attributes` is True). + """.lstrip() + ), + ): list, + Optional( + "with-attributes", + description=dedent( + """ + Limit dependencies to tasks whose attributes match + using :func:`~taskgraph.util.attributes.attrmatch`. + """.lstrip() + ), + ): {str: Any(list, str)}, + Optional( + "group-by", + description=dedent( + """ + Group cross-kind dependencies using the given group-by + function. One task will be created for each group. If not + specified, the 'single' function will be used which creates + a new task for each individual dependency. + """.lstrip() + ), + ): Any( + None, + *GROUP_BY_MAP, + {Any(*GROUP_BY_MAP): object}, + ), + Optional( + "copy-attributes", + description=dedent( + """ + If True, copy attributes from the dependency matching the + first kind in the `kinds` list (whether specified explicitly + or taken from `kind-dependencies`). + """.lstrip() + ), + ): bool, + Optional( + "unique-kinds", + description=dedent( + """ + If true (the default), there must be only a single unique task + for each kind in a dependency group. Setting this to false + disables that requirement. + """.lstrip() + ), + ): bool, + }, + Extra: object, + }, +) +"""Schema for from_deps transforms.""" + +transforms = TransformSequence() +transforms.add_validate(FROM_DEPS_SCHEMA) + + +@transforms.add +def from_deps(config, tasks): + for task in tasks: + # Setup and error handling. + from_deps = task.pop("from-deps") + kind_deps = config.config.get("kind-dependencies", []) + kinds = from_deps.get("kinds", kind_deps) + + invalid = set(kinds) - set(kind_deps) + if invalid: + invalid = "\n".join(sorted(invalid)) + raise Exception( + dedent( + f""" + The `from-deps.kinds` key contains the following kinds + that are not defined in `kind-dependencies`: + {invalid} + """.lstrip() + ) + ) + + if not kinds: + raise Exception( + dedent( + """ + The `from_deps` transforms require at least one kind defined + in `kind-dependencies`! + """.lstrip() + ) + ) + + # Resolve desired dependencies. + with_attributes = from_deps.get("with-attributes") + deps = [ + task + for task in config.kind_dependencies_tasks.values() + if task.kind in kinds + if not with_attributes or attrmatch(task.attributes, **with_attributes) + ] + + # Resolve groups. + group_by = from_deps.get("group-by", "single") + groups = set() + + if isinstance(group_by, dict): + assert len(group_by) == 1 + group_by, arg = group_by.popitem() + func = GROUP_BY_MAP[group_by] + if func.schema: + func.schema(arg) + groups = func(config, deps, arg) + else: + func = GROUP_BY_MAP[group_by] + groups = func(config, deps) + + # Split the task, one per group. + copy_attributes = from_deps.get("copy-attributes", False) + unique_kinds = from_deps.get("unique-kinds", True) + for group in groups: + # Verify there is only one task per kind in each group. + group_kinds = {t.kind for t in group} + if unique_kinds and len(group_kinds) < len(group): + raise Exception( + "The from_deps transforms only allow a single task per kind in a group!" + ) + + new_task = deepcopy(task) + new_task["dependencies"] = { + dep.kind if unique_kinds else dep.label: dep.label for dep in group + } + + # Set name and copy attributes from the primary kind. + for kind in kinds: + if kind in group_kinds: + primary_kind = kind + break + else: + raise Exception("Could not detect primary kind!") + + new_task.setdefault("attributes", {})[ + "primary-kind-dependency" + ] = primary_kind + + primary_dep = [dep for dep in group if dep.kind == primary_kind][0] + + if primary_dep.label.startswith(primary_kind): + new_task["name"] = primary_dep.label[len(primary_kind) + 1 :] + else: + new_task["name"] = primary_dep.label + + if copy_attributes: + attrs = new_task.get("attributes", {}) + new_task["attributes"] = primary_dep.attributes.copy() + new_task["attributes"].update(attrs) + + yield new_task diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/transforms/job/__init__.py b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/job/__init__.py index cc2615b702e40..7ba95bc916774 100644 --- a/third_party/python/taskcluster_taskgraph/taskgraph/transforms/job/__init__.py +++ b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/job/__init__.py @@ -55,6 +55,7 @@ Optional("run-on-projects"): task_description_schema["run-on-projects"], Optional("run-on-tasks-for"): task_description_schema["run-on-tasks-for"], Optional("run-on-git-branches"): task_description_schema["run-on-git-branches"], + Optional("shipping-phase"): task_description_schema["shipping-phase"], Optional("always-target"): task_description_schema["always-target"], Exclusive("optimization", "optimization"): task_description_schema[ "optimization" diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/transforms/job/common.py b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/job/common.py index 1660d0856a97a..04708daf8191b 100644 --- a/third_party/python/taskcluster_taskgraph/taskgraph/transforms/job/common.py +++ b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/job/common.py @@ -60,31 +60,6 @@ def add_cache(job, taskdesc, name, mount_point, skip_untrusted=False): pass -def docker_worker_add_workspace_cache(config, job, taskdesc, extra=None): - """Add the workspace cache. - - Args: - config (TransformConfig): Transform configuration object. - job (dict): Task's job description. - taskdesc (dict): Target task description to modify. - extra (str): Optional context passed in that supports extending the cache - key name to avoid undesired conflicts with other caches. - """ - cache_name = "{}-build-{}-{}-workspace".format( - config.params["project"], - taskdesc["attributes"]["build_platform"], - taskdesc["attributes"]["build_type"], - ) - if extra: - cache_name = f"{cache_name}-{extra}" - - mount_point = "{workdir}/workspace".format(**job["run"]) - - # Don't enable the workspace cache when we can't guarantee its - # behavior, like on Try. - add_cache(job, taskdesc, cache_name, mount_point, skip_untrusted=True) - - def add_artifacts(config, job, taskdesc, path): taskdesc["worker"].setdefault("artifacts", []).append( { diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/transforms/job/run_task.py b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/job/run_task.py index a44f30d5bd242..b8657a3d7ec1b 100644 --- a/third_party/python/taskcluster_taskgraph/taskgraph/transforms/job/run_task.py +++ b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/job/run_task.py @@ -5,17 +5,17 @@ Support for running jobs that are invoked via the `run-task` script. """ - +import dataclasses import os -import attr -from voluptuous import Any, Optional, Required +from voluptuous import Any, Extra, Optional, Required from taskgraph.transforms.job import run_job_using from taskgraph.transforms.job.common import support_vcs_checkout from taskgraph.transforms.task import taskref_or_string from taskgraph.util import path, taskcluster from taskgraph.util.schema import Schema +from taskgraph.util.yaml import load_yaml EXEC_COMMANDS = { "bash": ["bash", "-cx"], @@ -49,9 +49,18 @@ # Context to substitute into the command using format string # substitution (e.g {value}). This is useful if certain aspects of the # command need to be generated in transforms. - Optional("command-context"): dict, + Optional("command-context"): { + # If present, loads a set of context variables from an unnested yaml + # file. If a value is present in both the provided file and directly + # in command-context, the latter will take priority. + Optional("from-file"): str, + Extra: object, + }, # What to execute the command with in the event command is a string. Optional("exec-with"): Any(*list(EXEC_COMMANDS)), + # Command used to invoke the `run-task` script. Can be used if the script + # or Python installation is in a non-standard location on the workers. + Optional("run-task-command"): list, # Base work directory used to set up the task. Required("workdir"): str, # Whether to run as root. (defaults to False) @@ -68,7 +77,7 @@ def common_setup(config, job, taskdesc, command): raise Exception("Must explicitly specify checkouts with multiple repos.") elif run["checkout"] is not True: repo_configs = { - repo: attr.evolve(repo_configs[repo], **config) + repo: dataclasses.replace(repo_configs[repo], **config) for (repo, config) in run["checkout"].items() } @@ -128,13 +137,32 @@ def script_url(config, script): return f"{tc_url}/api/queue/v1/task/{task_id}/artifacts/public/{script}" +def substitute_command_context(command_context, command): + from_file = command_context.pop("from-file", None) + full_context = {} + if from_file: + full_context = load_yaml(from_file) + else: + full_context = {} + + full_context.update(command_context) + + if isinstance(command, list): + for i in range(len(command)): + command[i] = command[i].format(**full_context) + else: + command = command.format(**full_context) + + return command + + @run_job_using( "docker-worker", "run-task", schema=run_task_schema, defaults=worker_defaults ) def docker_worker_run_task(config, job, taskdesc): run = job["run"] worker = taskdesc["worker"] = job["worker"] - command = ["/usr/local/bin/run-task"] + command = run.pop("run-task-command", ["/usr/local/bin/run-task"]) common_setup(config, job, taskdesc, command) if run.get("cache-dotcache"): @@ -149,9 +177,12 @@ def docker_worker_run_task(config, job, taskdesc): run_command = run["command"] - command_context = run.get("command-context") - if command_context: - run_command = run_command.format(**command_context) + if run.get("command-context"): + run_command = substitute_command_context( + run.get("command-context"), run["command"] + ) + else: + run_command = run["command"] # dict is for the case of `{'task-reference': str}`. if isinstance(run_command, str) or isinstance(run_command, dict): @@ -174,12 +205,14 @@ def generic_worker_run_task(config, job, taskdesc): is_mac = worker["os"] == "macosx" is_bitbar = worker["os"] == "linux-bitbar" - if is_win: - command = ["C:/mozilla-build/python3/python3.exe", "run-task"] - elif is_mac: - command = ["/tools/python36/bin/python3", "run-task"] - else: - command = ["./run-task"] + command = run.pop("run-task-command", None) + if not command: + if is_win: + command = ["C:/mozilla-build/python3/python3.exe", "run-task"] + elif is_mac: + command = ["/tools/python36/bin/python3", "run-task"] + else: + command = ["./run-task"] common_setup(config, job, taskdesc, command) @@ -217,10 +250,10 @@ def generic_worker_run_task(config, job, taskdesc): exec_cmd = EXEC_COMMANDS[run.pop("exec-with", "bash")] run_command = exec_cmd + [run_command] - command_context = run.get("command-context") - if command_context: - for i in range(len(run_command)): - run_command[i] = run_command[i].format(**command_context) + if run.get("command-context"): + run_command = substitute_command_context( + run.get("command-context"), run_command + ) if run["run-as-root"]: command.extend(("--user", "root", "--group", "root")) diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/transforms/job/toolchain.py b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/job/toolchain.py index 5d4ee02f4a8c9..c9c09542ff954 100644 --- a/third_party/python/taskcluster_taskgraph/taskgraph/transforms/job/toolchain.py +++ b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/job/toolchain.py @@ -5,7 +5,7 @@ Support for running toolchain-building jobs via dedicated scripts """ -from voluptuous import Any, Optional, Required +from voluptuous import ALLOW_EXTRA, Any, Optional, Required import taskgraph from taskgraph.transforms.job import configure_taskdesc_for_run, run_job_using @@ -49,7 +49,8 @@ ): {str: object}, # Base work directory used to set up the task. Required("workdir"): str, - } + }, + extra=ALLOW_EXTRA, ) diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/transforms/notify.py b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/notify.py new file mode 100644 index 0000000000000..a61e7999c1d22 --- /dev/null +++ b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/notify.py @@ -0,0 +1,195 @@ +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +""" +Add notifications to tasks via Taskcluster's notify service. + +See https://docs.taskcluster.net/docs/reference/core/notify/usage for +more information. +""" +from voluptuous import ALLOW_EXTRA, Any, Exclusive, Optional, Required + +from taskgraph.transforms.base import TransformSequence +from taskgraph.util.schema import Schema, optionally_keyed_by, resolve_keyed_by + +_status_type = Any( + "on-completed", + "on-defined", + "on-exception", + "on-failed", + "on-pending", + "on-resolved", + "on-running", +) + +_recipients = [ + { + Required("type"): "email", + Required("address"): optionally_keyed_by("project", "level", str), + Optional("status-type"): _status_type, + }, + { + Required("type"): "matrix-room", + Required("room-id"): str, + Optional("status-type"): _status_type, + }, + { + Required("type"): "pulse", + Required("routing-key"): str, + Optional("status-type"): _status_type, + }, + { + Required("type"): "slack-channel", + Required("channel-id"): str, + Optional("status-type"): _status_type, + }, +] + +_route_keys = { + "email": "address", + "matrix-room": "room-id", + "pulse": "routing-key", + "slack-channel": "channel-id", +} +"""Map each type to its primary key that will be used in the route.""" + +NOTIFY_SCHEMA = Schema( + { + Exclusive("notify", "config"): { + Required("recipients"): [Any(*_recipients)], + Optional("content"): { + Optional("email"): { + Optional("subject"): str, + Optional("content"): str, + Optional("link"): { + Required("text"): str, + Required("href"): str, + }, + }, + Optional("matrix"): { + Optional("body"): str, + Optional("formatted-body"): str, + Optional("format"): str, + Optional("msg-type"): str, + }, + Optional("slack"): { + Optional("text"): str, + Optional("blocks"): list, + Optional("attachments"): list, + }, + }, + }, + # Continue supporting the legacy schema for backwards compat. + Exclusive("notifications", "config"): { + Required("emails"): optionally_keyed_by("project", "level", [str]), + Required("subject"): str, + Optional("message"): str, + Optional("status-types"): [_status_type], + }, + }, + extra=ALLOW_EXTRA, +) +"""Notify schema.""" + +transforms = TransformSequence() +transforms.add_validate(NOTIFY_SCHEMA) + + +def _convert_legacy(config, legacy, label): + """Convert the legacy format to the new one.""" + notify = { + "recipients": [], + "content": {"email": {"subject": legacy["subject"]}}, + } + resolve_keyed_by( + legacy, + "emails", + label, + **{ + "level": config.params["level"], + "project": config.params["project"], + }, + ) + + status_types = legacy.get("status-types", ["on-completed"]) + for email in legacy["emails"]: + for status_type in status_types: + notify["recipients"].append( + {"type": "email", "address": email, "status-type": status_type} + ) + + notify["content"]["email"]["content"] = legacy.get("message", legacy["subject"]) + return notify + + +def _convert_content(content): + """Convert the notify content to Taskcluster's format. + + The Taskcluster notification format is described here: + https://docs.taskcluster.net/docs/reference/core/notify/usage + """ + tc = {} + if "email" in content: + tc["email"] = content.pop("email") + + for key, obj in content.items(): + for name in obj.keys(): + tc_name = "".join(part.capitalize() for part in name.split("-")) + tc[f"{key}{tc_name}"] = obj[name] + return tc + + +@transforms.add +def add_notifications(config, tasks): + for task in tasks: + label = "{}-{}".format(config.kind, task["name"]) + if "notifications" in task: + notify = _convert_legacy(config, task.pop("notifications"), label) + else: + notify = task.pop("notify", None) + + if not notify: + yield task + continue + + format_kwargs = dict( + task=task, + config=config.__dict__, + ) + + def substitute(ctx): + """Recursively find all strings in a simple nested dict (no lists), + and format them in-place using `format_kwargs`.""" + for key, val in ctx.items(): + if isinstance(val, str): + ctx[key] = val.format(**format_kwargs) + elif isinstance(val, dict): + ctx[key] = substitute(val) + return ctx + + task.setdefault("routes", []) + for recipient in notify["recipients"]: + type = recipient["type"] + recipient.setdefault("status-type", "on-completed") + substitute(recipient) + + if type == "email": + resolve_keyed_by( + recipient, + "address", + label, + **{ + "level": config.params["level"], + "project": config.params["project"], + }, + ) + + task["routes"].append( + f"notify.{type}.{recipient[_route_keys[type]]}.{recipient['status-type']}" + ) + + if "content" in notify: + task.setdefault("extra", {}).update( + {"notify": _convert_content(substitute(notify["content"]))} + ) + yield task diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/transforms/task.py b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/task.py index 8ab3762b8c726..b81e7e5d57e1b 100644 --- a/third_party/python/taskcluster_taskgraph/taskgraph/transforms/task.py +++ b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/task.py @@ -14,8 +14,9 @@ import re import time from copy import deepcopy +from dataclasses import dataclass +from typing import Callable -import attr from voluptuous import All, Any, Extra, NotIn, Optional, Required from taskgraph import MAX_DEPENDENCIES @@ -31,7 +32,7 @@ taskref_or_string, validate_schema, ) -from taskgraph.util.treeherder import split_symbol +from taskgraph.util.treeherder import split_symbol, treeherder_defaults from taskgraph.util.workertypes import worker_type_implementation from ..util import docker as dockerutil @@ -95,20 +96,34 @@ def _run_task_suffix(): Optional("extra"): {str: object}, # treeherder-related information; see # https://schemas.taskcluster.net/taskcluster-treeherder/v1/task-treeherder-config.json + # This may be provided in one of two ways: + # 1) A simple `true` will cause taskgraph to generate the required information + # 2) A dictionary with one or more of the required keys. Any key not present + # will use a default as described below. # If not specified, no treeherder extra information or routes will be # added to the task - Optional("treeherder"): { - # either a bare symbol, or "grp(sym)". - "symbol": str, - # the job kind - "kind": Any("build", "test", "other"), - # tier for this task - "tier": int, - # task platform, in the form platform/collection, used to set - # treeherder.machine.platform and treeherder.collection or - # treeherder.labels - "platform": str, - }, + Optional("treeherder"): Any( + True, + { + # either a bare symbol, or "grp(sym)". + # The default symbol is the uppercased first letter of each + # section of the kind (delimited by "-") all smooshed together. + # Eg: "test" becomes "T", "docker-image" becomes "DI", etc. + "symbol": Optional(str), + # the job kind + # If "build" or "test" is found in the kind name, this defaults + # to the appropriate value. Otherwise, defaults to "other" + "kind": Optional(Any("build", "test", "other")), + # tier for this task + # Defaults to 1 + "tier": Optional(int), + # task platform, in the form platform/collection, used to set + # treeherder.machine.platform and treeherder.collection or + # treeherder.labels + # Defaults to "default/opt" + "platform": Optional(str), + }, + ), # information for indexing this build so its artifacts can be discovered; # if omitted, the build will not be indexed. Optional("index"): { @@ -142,6 +157,15 @@ def _run_task_suffix(): Optional("run-on-projects"): optionally_keyed_by("build-platform", [str]), Optional("run-on-tasks-for"): [str], Optional("run-on-git-branches"): [str], + # The `shipping_phase` attribute, defaulting to None. This specifies the + # release promotion phase that this task belongs to. + Optional("shipping-phase"): Any( + None, + "build", + "promote", + "push", + "ship", + ), # The `always-target` attribute will cause the task to be included in the # target_task_graph regardless of filtering. Tasks included in this manner # will be candidates for optimization even when `optimize_target_tasks` is @@ -197,14 +221,21 @@ def get_default_priority(graph_config, project): ) +@memoize +def get_default_deadline(graph_config, project): + return evaluate_keyed_by( + graph_config["task-deadline-after"], "Graph Config", {"project": project} + ) + + # define a collection of payload builders, depending on the worker implementation payload_builders = {} -@attr.s(frozen=True) +@dataclass(frozen=True) class PayloadBuilder: - schema = attr.ib(type=Schema) - builder = attr.ib() + schema: Schema + builder: Callable def payload_builder(name, schema): @@ -268,7 +299,6 @@ def verify_index(config, index): Required("loopback-audio"): bool, Required("docker-in-docker"): bool, # (aka 'dind') Required("privileged"): bool, - Required("disable-seccomp"): bool, # Paths to Docker volumes. # # For in-tree Docker images, volumes can be parsed from Dockerfile. @@ -407,10 +437,6 @@ def build_docker_worker_payload(config, task, task_def): capabilities["privileged"] = True task_def["scopes"].append("docker-worker:capability:privileged") - if worker.get("disable-seccomp"): - capabilities["disableSeccomp"] = True - task_def["scopes"].append("docker-worker:capability:disableSeccomp") - task_def["payload"] = payload = { "image": image, "env": worker["env"], @@ -598,6 +624,11 @@ def build_docker_worker_payload(config, task, task_def): Required("env"): {str: taskref_or_string}, # the maximum time to run, in seconds Required("max-run-time"): int, + # the exit status code(s) that indicates the task should be retried + Optional("retry-exit-status"): [int], + # the exit status code(s) that indicates the caches used by the task + # should be purged + Optional("purge-caches-exit-status"): [int], # os user groups for test task workers Optional("os-groups"): [str], # feature for test task to run as administarotr @@ -620,6 +651,8 @@ def build_generic_worker_payload(config, task, task_def): on_exit_status = {} if "retry-exit-status" in worker: on_exit_status["retry"] = worker["retry-exit-status"] + if "purge-caches-exit-status" in worker: + on_exit_status["purgeCaches"] = worker["purge-caches-exit-status"] if worker["os"] == "windows": on_exit_status.setdefault("retry", []).extend( [ @@ -836,7 +869,6 @@ def set_defaults(config, tasks): worker.setdefault("loopback-audio", False) worker.setdefault("docker-in-docker", False) worker.setdefault("privileged", False) - worker.setdefault("disable-seccomp", False) worker.setdefault("volumes", []) worker.setdefault("env", {}) if "caches" in worker: @@ -914,6 +946,78 @@ def add_generic_index_routes(config, task): return task +@transforms.add +def process_treeherder_metadata(config, tasks): + for task in tasks: + routes = task.get("routes", []) + extra = task.get("extra", {}) + task_th = task.get("treeherder") + + if task_th: + # This `merged_th` object is just an intermediary that combines + # the defaults and whatever is in the task. Ultimately, the task + # transforms this data a bit in the `treeherder` object that is + # eventually set in the task. + merged_th = treeherder_defaults(config.kind, task["label"]) + if isinstance(task_th, dict): + merged_th.update(task_th) + + treeherder = extra.setdefault("treeherder", {}) + extra.setdefault("treeherder-platform", merged_th["platform"]) + + machine_platform, collection = merged_th["platform"].split("/", 1) + treeherder["machine"] = {"platform": machine_platform} + treeherder["collection"] = {collection: True} + + group_names = config.graph_config["treeherder"]["group-names"] + groupSymbol, symbol = split_symbol(merged_th["symbol"]) + if groupSymbol != "?": + treeherder["groupSymbol"] = groupSymbol + if groupSymbol not in group_names: + path = os.path.join(config.path, task.get("task-from", "")) + raise Exception(UNKNOWN_GROUP_NAME.format(groupSymbol, path)) + treeherder["groupName"] = group_names[groupSymbol] + treeherder["symbol"] = symbol + if len(symbol) > 25 or len(groupSymbol) > 25: + raise RuntimeError( + "Treeherder group and symbol names must not be longer than " + "25 characters: {} (see {})".format( + treeherder["symbol"], + TC_TREEHERDER_SCHEMA_URL, + ) + ) + treeherder["jobKind"] = merged_th["kind"] + treeherder["tier"] = merged_th["tier"] + + branch_rev = get_branch_rev(config) + + if config.params["tasks_for"].startswith("github-pull-request"): + # In the past we used `project` for this, but that ends up being + # set to the repository name of the _head_ repo, which is not correct + # (and causes scope issues) if it doesn't match the name of the + # base repo + base_project = config.params["base_repository"].split("/")[-1] + if base_project.endswith(".git"): + base_project = base_project[:-4] + th_project_suffix = "-pr" + else: + base_project = config.params["project"] + th_project_suffix = "" + + routes.append( + "{}.v2.{}.{}.{}".format( + TREEHERDER_ROUTE_ROOT, + base_project + th_project_suffix, + branch_rev, + config.params["pushlog_id"], + ) + ) + + task["routes"] = routes + task["extra"] = extra + yield task + + @transforms.add def add_index_routes(config, tasks): for task in tasks: @@ -926,7 +1030,7 @@ def add_index_routes(config, tasks): if rank == "by-tier": # rank is zero for non-tier-1 tasks and based on pushid for others; # this sorts tier-{2,3} builds below tier-1 in the index - tier = task.get("treeherder", {}).get("tier", 3) + tier = task.get("extra", {}).get("treeherder", {}).get("tier", 3) extra_index["rank"] = 0 if tier > 1 else int(config.params["build_date"]) elif rank == "build_date": extra_index["rank"] = int(config.params["build_date"]) @@ -967,64 +1071,17 @@ def build_task(config, tasks): # set up extra extra = task.get("extra", {}) extra["parent"] = os.environ.get("TASK_ID", "") - task_th = task.get("treeherder") - if task_th: - extra.setdefault("treeherder-platform", task_th["platform"]) - treeherder = extra.setdefault("treeherder", {}) - - machine_platform, collection = task_th["platform"].split("/", 1) - treeherder["machine"] = {"platform": machine_platform} - treeherder["collection"] = {collection: True} - - group_names = config.graph_config["treeherder"]["group-names"] - groupSymbol, symbol = split_symbol(task_th["symbol"]) - if groupSymbol != "?": - treeherder["groupSymbol"] = groupSymbol - if groupSymbol not in group_names: - path = os.path.join(config.path, task.get("task-from", "")) - raise Exception(UNKNOWN_GROUP_NAME.format(groupSymbol, path)) - treeherder["groupName"] = group_names[groupSymbol] - treeherder["symbol"] = symbol - if len(symbol) > 25 or len(groupSymbol) > 25: - raise RuntimeError( - "Treeherder group and symbol names must not be longer than " - "25 characters: {} (see {})".format( - task_th["symbol"], - TC_TREEHERDER_SCHEMA_URL, - ) - ) - treeherder["jobKind"] = task_th["kind"] - treeherder["tier"] = task_th["tier"] - - branch_rev = get_branch_rev(config) - - if config.params["tasks_for"].startswith("github-pull-request"): - # In the past we used `project` for this, but that ends up being - # set to the repository name of the _head_ repo, which is not correct - # (and causes scope issues) if it doesn't match the name of the - # base repo - base_project = config.params["base_repository"].split("/")[-1] - if base_project.endswith(".git"): - base_project = base_project[:-4] - th_project_suffix = "-pr" - else: - base_project = config.params["project"] - th_project_suffix = "" - - routes.append( - "{}.v2.{}.{}.{}".format( - TREEHERDER_ROUTE_ROOT, - base_project + th_project_suffix, - branch_rev, - config.params["pushlog_id"], - ) - ) if "expires-after" not in task: task["expires-after"] = "28 days" if config.params.is_try() else "1 year" if "deadline-after" not in task: - task["deadline-after"] = "1 day" + if "task-deadline-after" in config.graph_config: + task["deadline-after"] = get_default_deadline( + config.graph_config, config.params["project"] + ) + else: + task["deadline-after"] = "1 day" if "priority" not in task: task["priority"] = get_default_priority( @@ -1062,7 +1119,21 @@ def build_task(config, tasks): if task.get("requires", None): task_def["requires"] = task["requires"] - if task_th: + if task.get("extra", {}).get("treeherder"): + branch_rev = get_branch_rev(config) + if config.params["tasks_for"].startswith("github-pull-request"): + # In the past we used `project` for this, but that ends up being + # set to the repository name of the _head_ repo, which is not correct + # (and causes scope issues) if it doesn't match the name of the + # base repo + base_project = config.params["base_repository"].split("/")[-1] + if base_project.endswith(".git"): + base_project = base_project[:-4] + th_project_suffix = "-pr" + else: + base_project = config.params["project"] + th_project_suffix = "" + # link back to treeherder in description th_push_link = ( "https://treeherder.mozilla.org/#/jobs?repo={}&revision={}".format( @@ -1095,6 +1166,14 @@ def build_task(config, tasks): attributes["run_on_git_branches"] = task["run-on-git-branches"] attributes["always_target"] = task["always-target"] + # This logic is here since downstream tasks don't always match their + # upstream dependency's shipping_phase. + # A text_type task['shipping-phase'] takes precedence, then + # an existing attributes['shipping_phase'], then fall back to None. + if task.get("shipping-phase") is not None: + attributes["shipping_phase"] = task["shipping-phase"] + else: + attributes.setdefault("shipping_phase", None) # Set MOZ_AUTOMATION on all jobs. if task["worker"]["implementation"] in ( @@ -1185,12 +1264,17 @@ def check_task_identifiers(config, tasks): def check_task_dependencies(config, tasks): """Ensures that tasks don't have more than 100 dependencies.""" for task in tasks: - if len(task["dependencies"]) > MAX_DEPENDENCIES: + number_of_dependencies = ( + len(task["dependencies"]) + + len(task["if-dependencies"]) + + len(task["soft-dependencies"]) + ) + if number_of_dependencies > MAX_DEPENDENCIES: raise Exception( "task {}/{} has too many dependencies ({} > {})".format( config.kind, task["label"], - len(task["dependencies"]), + number_of_dependencies, MAX_DEPENDENCIES, ) ) @@ -1216,10 +1300,11 @@ def check_caches_are_volumes(task): return raise Exception( - "task %s (image %s) has caches that are not declared as " - "Docker volumes: %s " - "(have you added them as VOLUMEs in the Dockerfile?)" - % (task["label"], task["worker"]["docker-image"], ", ".join(sorted(missing))) + "task {} (image {}) has caches that are not declared as " + "Docker volumes: {} " + "(have you added them as VOLUMEs in the Dockerfile?)".format( + task["label"], task["worker"]["docker-image"], ", ".join(sorted(missing)) + ) ) @@ -1272,17 +1357,17 @@ def check_run_task_caches(config, tasks): if not run_task: raise Exception( - "%s is using a cache (%s) reserved for run-task " + f"{task['label']} is using a cache ({cache}) reserved for run-task " "change the task to use run-task or use a different " - "cache name" % (task["label"], cache) + "cache name" ) if not cache.endswith(suffix): raise Exception( - "%s is using a cache (%s) reserved for run-task " + f"{task['label']} is using a cache ({cache}) reserved for run-task " "but the cache name is not dependent on the contents " "of run-task; change the cache name to conform to the " - "naming requirements" % (task["label"], cache) + "naming requirements" ) yield task diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/util/attributes.py b/third_party/python/taskcluster_taskgraph/taskgraph/util/attributes.py index cf6f11c573a72..74d6996629655 100644 --- a/third_party/python/taskcluster_taskgraph/taskgraph/util/attributes.py +++ b/third_party/python/taskcluster_taskgraph/taskgraph/util/attributes.py @@ -7,18 +7,30 @@ def attrmatch(attributes, **kwargs): - """Determine whether the given set of task attributes matches. The - conditions are given as keyword arguments, where each keyword names an - attribute. The keyword value can be a literal, a set, or a callable. A - literal must match the attribute exactly. Given a set, the attribute value - must be in the set. A callable is called with the attribute value. If an - attribute is specified as a keyword argument but not present in the - attributes, the result is False.""" + """Determine whether the given set of task attributes matches. + + The conditions are given as keyword arguments, where each keyword names an + attribute. The keyword value can be a literal, a set, or a callable: + + * A literal must match the attribute exactly. + * Given a set or list, the attribute value must be contained within it. + * A callable is called with the attribute value and returns a boolean. + + If an attribute is specified as a keyword argument but not present in the + task's attributes, the result is False. + + Args: + attributes (dict): The task's attributes object. + kwargs (dict): The conditions the task's attributes must satisfy in + order to match. + Returns: + bool: Whether the task's attributes match the conditions or not. + """ for kwkey, kwval in kwargs.items(): if kwkey not in attributes: return False attval = attributes[kwkey] - if isinstance(kwval, set): + if isinstance(kwval, (set, list)): if attval not in kwval: return False elif callable(kwval): diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/util/dependencies.py b/third_party/python/taskcluster_taskgraph/taskgraph/util/dependencies.py new file mode 100644 index 0000000000000..7a715f3a26fea --- /dev/null +++ b/third_party/python/taskcluster_taskgraph/taskgraph/util/dependencies.py @@ -0,0 +1,89 @@ +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + +from typing import Dict, Iterator, Optional + +from taskgraph.task import Task +from taskgraph.transforms.base import TransformConfig +from taskgraph.util.schema import Schema + +# Define a collection of group_by functions +GROUP_BY_MAP = {} + + +def group_by(name, schema=None): + def wrapper(func): + GROUP_BY_MAP[name] = func + func.schema = schema + return func + + return wrapper + + +@group_by("single") +def group_by_single(config, tasks): + for task in tasks: + yield [task] + + +@group_by("all") +def group_by_all(config, tasks): + return [[task for task in tasks]] + + +@group_by("attribute", schema=Schema(str)) +def group_by_attribute(config, tasks, attr): + groups = {} + for task in tasks: + val = task.attributes.get(attr) + if not val: + continue + groups.setdefault(val, []).append(task) + + return groups.values() + + +def get_dependencies(config: TransformConfig, task: Dict) -> Iterator[Task]: + """Iterate over all dependencies as ``Task`` objects. + + Args: + config (TransformConfig): The ``TransformConfig`` object associated + with the kind. + task (Dict): The task dictionary to retrieve dependencies from. + + Returns: + Iterator[Task]: Returns a generator that iterates over the ``Task`` + objects associated with each dependency. + """ + if "dependencies" not in task: + return [] + + for label, dep in config.kind_dependencies_tasks.items(): + if label in task["dependencies"].values(): + yield dep + + +def get_primary_dependency(config: TransformConfig, task: Dict) -> Optional[Task]: + """Return the ``Task`` object associated with the primary dependency. + + This uses the task's ``primary-kind-dependency`` attribute to find the primary + dependency, or returns ``None`` if the attribute is unset. + + Args: + config (TransformConfig): The ``TransformConfig`` object associated + with the kind. + task (Dict): The task dictionary to retrieve the primary dependency from. + + Returns: + Optional[Task]: The ``Task`` object associated with the + primary dependency or ``None``. + """ + try: + primary_kind = task["attributes"]["primary-kind-dependency"] + except KeyError: + return None + + for dep in get_dependencies(config, task): + if dep.kind == primary_kind: + return dep diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/util/hash.py b/third_party/python/taskcluster_taskgraph/taskgraph/util/hash.py index bf786e92e4dd6..5d884fc3188a2 100644 --- a/third_party/python/taskcluster_taskgraph/taskgraph/util/hash.py +++ b/third_party/python/taskcluster_taskgraph/taskgraph/util/hash.py @@ -19,12 +19,6 @@ def hash_path(path): return hashlib.sha256(fh.read()).hexdigest() -def _find_files(base_path): - for path in Path(base_path).rglob("*"): - if path.is_file(): - yield str(path) - - def hash_paths(base_path, patterns): """ Give a list of path patterns, return a digest of the contents of all @@ -38,8 +32,7 @@ def hash_paths(base_path, patterns): found = set() for pattern in patterns: - files = _find_files(base_path) - matches = [path for path in files if mozpath.match(path, pattern)] + matches = _find_matching_files(base_path, pattern) if matches: found.update(matches) else: @@ -52,3 +45,14 @@ def hash_paths(base_path, patterns): ).encode("utf-8") ) return h.hexdigest() + + +@memoize +def _find_matching_files(base_path, pattern): + files = _get_all_files(base_path) + return [path for path in files if mozpath.match(path, pattern)] + + +@memoize +def _get_all_files(base_path): + return [str(path) for path in Path(base_path).rglob("*") if path.is_file()] diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/util/path.py b/third_party/python/taskcluster_taskgraph/taskgraph/util/path.py index 728b648ac1953..c725140b12c4b 100644 --- a/third_party/python/taskcluster_taskgraph/taskgraph/util/path.py +++ b/third_party/python/taskcluster_taskgraph/taskgraph/util/path.py @@ -88,18 +88,13 @@ def basedir(path, bases): if path in bases: return path for b in sorted(bases, reverse=True): - if b == "" or path.startswith(b + "/"): + if not b or path.startswith(b + "/"): return b re_cache = {} -# Python versions < 3.7 return r'\/' for re.escape('/'). -if re.escape("/") == "/": - MATCH_STAR_STAR_RE = re.compile(r"(^|/)\\\*\\\*/") - MATCH_STAR_STAR_END_RE = re.compile(r"(^|/)\\\*\\\*$") -else: - MATCH_STAR_STAR_RE = re.compile(r"(^|\\\/)\\\*\\\*\\\/") - MATCH_STAR_STAR_END_RE = re.compile(r"(^|\\\/)\\\*\\\*$") +MATCH_STAR_STAR_RE = re.compile(r"(^|/)\\\*\\\*/") +MATCH_STAR_STAR_END_RE = re.compile(r"(^|/)\\\*\\\*$") def match(path, pattern): diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/util/treeherder.py b/third_party/python/taskcluster_taskgraph/taskgraph/util/treeherder.py index 9d0c032a1bdc7..cff5f286cc784 100644 --- a/third_party/python/taskcluster_taskgraph/taskgraph/util/treeherder.py +++ b/third_party/python/taskcluster_taskgraph/taskgraph/util/treeherder.py @@ -62,3 +62,23 @@ def inherit_treeherder_from_dep(job, dep_job): # Does not set symbol treeherder.setdefault("kind", "build") return treeherder + + +def treeherder_defaults(kind, label): + defaults = { + # Despite its name, this is expected to be a platform+collection + "platform": "default/opt", + "tier": 1, + } + if "build" in kind: + defaults["kind"] = "build" + elif "test" in kind: + defaults["kind"] = "test" + else: + defaults["kind"] = "other" + + # Takes the uppercased first letter of each part of the kind name, eg: + # apple-banana -> AB + defaults["symbol"] = "".join([c[0] for c in kind.split("-")]).upper() + + return defaults diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/util/vcs.py b/third_party/python/taskcluster_taskgraph/taskgraph/util/vcs.py index ba1d909019919..7f7e7cf18aeec 100644 --- a/third_party/python/taskcluster_taskgraph/taskgraph/util/vcs.py +++ b/third_party/python/taskcluster_taskgraph/taskgraph/util/vcs.py @@ -166,7 +166,10 @@ def update(self, ref): @abstractmethod def find_latest_common_revision(self, base_ref_or_rev, head_rev): """Find the latest revision that is common to both the given - ``head_rev`` and ``base_ref_or_rev``""" + ``head_rev`` and ``base_ref_or_rev``. + + If no common revision exists, ``Repository.NULL_REVISION`` will + be returned.""" @abstractmethod def does_revision_exist_locally(self, revision): @@ -225,8 +228,8 @@ def get_url(self, remote="default"): return self.run("path", "-T", "{url}", remote).strip() def get_commit_message(self, revision=None): - revision = revision or self.head_rev - return self.run("log", "-r", ".", "-T", "{desc}") + revision = revision or "." + return self.run("log", "-r", revision, "-T", "{desc}") def _format_diff_filter(self, diff_filter, for_status=False): df = diff_filter.lower() @@ -296,17 +299,18 @@ def update(self, ref): return self.run("update", "--check", ref) def find_latest_common_revision(self, base_ref_or_rev, head_rev): - return self.run( + ancestor = self.run( "log", "-r", f"last(ancestors('{base_ref_or_rev}') and ancestors('{head_rev}'))", "--template", "{node}", ).strip() + return ancestor or self.NULL_REVISION def does_revision_exist_locally(self, revision): try: - return self.run("log", "-r", revision).strip() != "" + return bool(self.run("log", "-r", revision).strip()) except subprocess.CalledProcessError as e: # Error code 255 comes with the message: # "abort: unknown revision $REVISION" @@ -415,8 +419,8 @@ def get_url(self, remote="origin"): return self.run("remote", "get-url", remote).strip() def get_commit_message(self, revision=None): - revision = revision or self.head_rev - return self.run("log", "-n1", "--format=%B") + revision = revision or "HEAD" + return self.run("log", "-n1", "--format=%B", revision) def get_changed_files( self, diff_filter="ADM", mode="unstaged", rev=None, base_rev=None @@ -482,7 +486,10 @@ def update(self, ref): self.run("checkout", ref) def find_latest_common_revision(self, base_ref_or_rev, head_rev): - return self.run("merge-base", base_ref_or_rev, head_rev).strip() + try: + return self.run("merge-base", base_ref_or_rev, head_rev).strip() + except subprocess.CalledProcessError: + return self.NULL_REVISION def does_revision_exist_locally(self, revision): try: diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/util/verify.py b/third_party/python/taskcluster_taskgraph/taskgraph/util/verify.py index 5911914f135aa..e6705c16cf30d 100644 --- a/third_party/python/taskcluster_taskgraph/taskgraph/util/verify.py +++ b/third_party/python/taskcluster_taskgraph/taskgraph/util/verify.py @@ -6,8 +6,8 @@ import logging import sys from abc import ABC, abstractmethod - -import attr +from dataclasses import dataclass, field +from typing import Callable, Dict, List, Union from taskgraph.config import GraphConfig from taskgraph.parameters import Parameters @@ -18,16 +18,16 @@ logger = logging.getLogger(__name__) -@attr.s(frozen=True) +@dataclass(frozen=True) class Verification(ABC): - func = attr.ib() + func: Callable @abstractmethod def verify(self, **kwargs) -> None: pass -@attr.s(frozen=True) +@dataclass(frozen=True) class InitialVerification(Verification): """Verification that doesn't depend on any generation state.""" @@ -35,11 +35,11 @@ def verify(self): self.func() -@attr.s(frozen=True) +@dataclass(frozen=True) class GraphVerification(Verification): """Verification for a TaskGraph object.""" - run_on_projects = attr.ib(default=None) + run_on_projects: Union[List, None] = field(default=None) def verify( self, graph: TaskGraph, graph_config: GraphConfig, parameters: Parameters @@ -65,7 +65,7 @@ def verify( ) -@attr.s(frozen=True) +@dataclass(frozen=True) class ParametersVerification(Verification): """Verification for a set of parameters.""" @@ -73,7 +73,7 @@ def verify(self, parameters: Parameters): self.func(parameters) -@attr.s(frozen=True) +@dataclass(frozen=True) class KindsVerification(Verification): """Verification for kinds.""" @@ -81,7 +81,7 @@ def verify(self, kinds: dict): self.func(kinds) -@attr.s(frozen=True) +@dataclass(frozen=True) class VerificationSequence: """ Container for a sequence of verifications over a TaskGraph. Each @@ -91,7 +91,7 @@ class VerificationSequence: that was passed for each task. """ - _verifications = attr.ib(factory=dict) + _verifications: Dict = field(default_factory=dict) _verification_types = { "graph": GraphVerification, "initial": InitialVerification, diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/util/workertypes.py b/third_party/python/taskcluster_taskgraph/taskgraph/util/workertypes.py index d71f7e06a3500..da39654d6b198 100644 --- a/third_party/python/taskcluster_taskgraph/taskgraph/util/workertypes.py +++ b/third_party/python/taskcluster_taskgraph/taskgraph/util/workertypes.py @@ -2,17 +2,16 @@ # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. - -import attr +from dataclasses import dataclass from .keyed_by import evaluate_keyed_by from .memoize import memoize -@attr.s +@dataclass class _BuiltinWorkerType: - provisioner = attr.ib(str) - worker_type = attr.ib(str) + provisioner: str + worker_type: str @property def implementation(self): @@ -66,10 +65,14 @@ def get_worker_type(graph_config, alias, level): worker_config["provisioner"], alias, {"level": level}, - ).format(level=level) + ).format( + **{"alias": alias, "level": level, "trust-domain": graph_config["trust-domain"]} + ) worker_type = evaluate_keyed_by( worker_config["worker-type"], alias, {"level": level}, - ).format(level=level, alias=alias) + ).format( + **{"alias": alias, "level": level, "trust-domain": graph_config["trust-domain"]} + ) return provisioner, worker_type diff --git a/third_party/python/text_unidecode/text_unidecode-1.3.dist-info/DESCRIPTION.rst b/third_party/python/text_unidecode/text_unidecode-1.3.dist-info/DESCRIPTION.rst new file mode 100644 index 0000000000000..a17ced9af039f --- /dev/null +++ b/third_party/python/text_unidecode/text_unidecode-1.3.dist-info/DESCRIPTION.rst @@ -0,0 +1,46 @@ +Text-Unidecode +============== + +.. image:: https://travis-ci.org/kmike/text-unidecode.svg?branch=master + :target: https://travis-ci.org/kmike/text-unidecode + :alt: Build Status + +text-unidecode is the most basic port of the +`Text::Unidecode `_ +Perl library. + +There are other Python ports of Text::Unidecode (unidecode_ +and isounidecode_). unidecode_ is GPL; isounidecode_ uses too much memory, +and it didn't support Python 3 when this package was created. + +You can redistribute it and/or modify this port under the terms of either: + +* `Artistic License`_, or +* GPL or GPLv2+ + +If you're OK with GPL-only, use unidecode_ (it has better memory usage and +better transliteration quality). + +``text-unidecode`` supports Python 2.7 and 3.4+. + +.. _unidecode: https://pypi.python.org/pypi/Unidecode/ +.. _isounidecode: https://pypi.python.org/pypi/isounidecode/ +.. _Artistic License: https://opensource.org/licenses/Artistic-Perl-1.0 + +Installation +------------ + +:: + + pip install text-unidecode + +Usage +----- + +:: + + >>> from text_unidecode import unidecode + >>> unidecode(u'какой-то текст') + 'kakoi-to tekst' + + diff --git a/third_party/python/text_unidecode/text_unidecode-1.3.dist-info/LICENSE.txt b/third_party/python/text_unidecode/text_unidecode-1.3.dist-info/LICENSE.txt new file mode 100644 index 0000000000000..5ed2d0fda36f8 --- /dev/null +++ b/third_party/python/text_unidecode/text_unidecode-1.3.dist-info/LICENSE.txt @@ -0,0 +1,134 @@ +text-unidecode is a free software; you can redistribute +it and/or modify it under the terms of either: + +* GPL or GPLv2+ (see https://www.gnu.org/licenses/license-list.html#GNUGPL), or +* Artistic License - see below: + + + The "Artistic License" + + Preamble + +The intent of this document is to state the conditions under which a +Package may be copied, such that the Copyright Holder maintains some +semblance of artistic control over the development of the package, +while giving the users of the package the right to use and distribute +the Package in a more-or-less customary fashion, plus the right to make +reasonable modifications. + +Definitions: + + "Package" refers to the collection of files distributed by the + Copyright Holder, and derivatives of that collection of files + created through textual modification. + + "Standard Version" refers to such a Package if it has not been + modified, or has been modified in accordance with the wishes + of the Copyright Holder as specified below. + + "Copyright Holder" is whoever is named in the copyright or + copyrights for the package. + + "You" is you, if you're thinking about copying or distributing + this Package. + + "Reasonable copying fee" is whatever you can justify on the + basis of media cost, duplication charges, time of people involved, + and so on. (You will not be required to justify it to the + Copyright Holder, but only to the computing community at large + as a market that must bear the fee.) + + "Freely Available" means that no fee is charged for the item + itself, though there may be fees involved in handling the item. + It also means that recipients of the item may redistribute it + under the same conditions they received it. + +1. You may make and give away verbatim copies of the source form of the +Standard Version of this Package without restriction, provided that you +duplicate all of the original copyright notices and associated disclaimers. + +2. You may apply bug fixes, portability fixes and other modifications +derived from the Public Domain or from the Copyright Holder. A Package +modified in such a way shall still be considered the Standard Version. + +3. You may otherwise modify your copy of this Package in any way, provided +that you insert a prominent notice in each changed file stating how and +when you changed that file, and provided that you do at least ONE of the +following: + + a) place your modifications in the Public Domain or otherwise make them + Freely Available, such as by posting said modifications to Usenet or + an equivalent medium, or placing the modifications on a major archive + site such as uunet.uu.net, or by allowing the Copyright Holder to include + your modifications in the Standard Version of the Package. + + b) use the modified Package only within your corporation or organization. + + c) rename any non-standard executables so the names do not conflict + with standard executables, which must also be provided, and provide + a separate manual page for each non-standard executable that clearly + documents how it differs from the Standard Version. + + d) make other distribution arrangements with the Copyright Holder. + +4. You may distribute the programs of this Package in object code or +executable form, provided that you do at least ONE of the following: + + a) distribute a Standard Version of the executables and library files, + together with instructions (in the manual page or equivalent) on where + to get the Standard Version. + + b) accompany the distribution with the machine-readable source of + the Package with your modifications. + + c) give non-standard executables non-standard names, and clearly + document the differences in manual pages (or equivalent), together + with instructions on where to get the Standard Version. + + d) make other distribution arrangements with the Copyright Holder. + +5. You may charge a reasonable copying fee for any distribution of this +Package. You may charge any fee you choose for support of this +Package. You may not charge a fee for this Package itself. However, +you may distribute this Package in aggregate with other (possibly +commercial) programs as part of a larger (possibly commercial) software +distribution provided that you do not advertise this Package as a +product of your own. You may embed this Package's interpreter within +an executable of yours (by linking); this shall be construed as a mere +form of aggregation, provided that the complete Standard Version of the +interpreter is so embedded. + +6. The scripts and library files supplied as input to or produced as +output from the programs of this Package do not automatically fall +under the copyright of this Package, but belong to whoever generated +them, and may be sold commercially, and may be aggregated with this +Package. If such scripts or library files are aggregated with this +Package via the so-called "undump" or "unexec" methods of producing a +binary executable image, then distribution of such an image shall +neither be construed as a distribution of this Package nor shall it +fall under the restrictions of Paragraphs 3 and 4, provided that you do +not represent such an executable image as a Standard Version of this +Package. + +7. C subroutines (or comparably compiled subroutines in other +languages) supplied by you and linked into this Package in order to +emulate subroutines and variables of the language defined by this +Package shall not be considered part of this Package, but are the +equivalent of input as in Paragraph 6, provided these subroutines do +not change the language in any way that would cause it to fail the +regression tests for the language. + +8. Aggregation of this Package with a commercial distribution is always +permitted provided that the use of this Package is embedded; that is, +when no overt attempt is made to make this Package's interfaces visible +to the end user of the commercial distribution. Such use shall not be +construed as a distribution of this Package. + +9. The name of the Copyright Holder may not be used to endorse or promote +products derived from this software without specific prior written permission. + +10. THIS PACKAGE IS PROVIDED "AS IS" AND WITHOUT ANY EXPRESS OR +IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED +WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. + + The End diff --git a/third_party/python/text_unidecode/text_unidecode-1.3.dist-info/METADATA b/third_party/python/text_unidecode/text_unidecode-1.3.dist-info/METADATA new file mode 100644 index 0000000000000..23bd3d45bda19 --- /dev/null +++ b/third_party/python/text_unidecode/text_unidecode-1.3.dist-info/METADATA @@ -0,0 +1,73 @@ +Metadata-Version: 2.0 +Name: text-unidecode +Version: 1.3 +Summary: The most basic Text::Unidecode port +Home-page: https://github.com/kmike/text-unidecode/ +Author: Mikhail Korobov +Author-email: kmike84@gmail.com +License: Artistic License +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: Artistic License +Classifier: License :: OSI Approved :: GNU General Public License (GPL) +Classifier: License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+) +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: Text Processing :: Linguistic + +Text-Unidecode +============== + +.. image:: https://travis-ci.org/kmike/text-unidecode.svg?branch=master + :target: https://travis-ci.org/kmike/text-unidecode + :alt: Build Status + +text-unidecode is the most basic port of the +`Text::Unidecode `_ +Perl library. + +There are other Python ports of Text::Unidecode (unidecode_ +and isounidecode_). unidecode_ is GPL; isounidecode_ uses too much memory, +and it didn't support Python 3 when this package was created. + +You can redistribute it and/or modify this port under the terms of either: + +* `Artistic License`_, or +* GPL or GPLv2+ + +If you're OK with GPL-only, use unidecode_ (it has better memory usage and +better transliteration quality). + +``text-unidecode`` supports Python 2.7 and 3.4+. + +.. _unidecode: https://pypi.python.org/pypi/Unidecode/ +.. _isounidecode: https://pypi.python.org/pypi/isounidecode/ +.. _Artistic License: https://opensource.org/licenses/Artistic-Perl-1.0 + +Installation +------------ + +:: + + pip install text-unidecode + +Usage +----- + +:: + + >>> from text_unidecode import unidecode + >>> unidecode(u'какой-то текст') + 'kakoi-to tekst' + + diff --git a/third_party/python/text_unidecode/text_unidecode-1.3.dist-info/RECORD b/third_party/python/text_unidecode/text_unidecode-1.3.dist-info/RECORD new file mode 100644 index 0000000000000..909fae9e17d9f --- /dev/null +++ b/third_party/python/text_unidecode/text_unidecode-1.3.dist-info/RECORD @@ -0,0 +1,9 @@ +text_unidecode/__init__.py,sha256=_hESqlvGR_cTy0oryPuoyrVntCOIID7bHDA-y22I1ig,484 +text_unidecode/data.bin,sha256=eSRmbaTOCtJNS4FCszDm2OiUZc2IOTRyTNnkt9gTDwk,311077 +text_unidecode-1.3.dist-info/DESCRIPTION.rst,sha256=6Fgx54K_UeXRByELmqkfLcHlbXhsvNNJdkPFT0VF0J0,1199 +text_unidecode-1.3.dist-info/LICENSE.txt,sha256=OTjnU1w-TvfmNj3ptpP-O6_jxKXl9JJc1IN1CW_nr9U,6535 +text_unidecode-1.3.dist-info/METADATA,sha256=B9j-1l4-yN9P5e8mpBrXCqAQsRUnA4Izyy0hG7Jyrn4,2422 +text_unidecode-1.3.dist-info/RECORD,, +text_unidecode-1.3.dist-info/WHEEL,sha256=o2k-Qa-RMNIJmUdIc7KU6VWR_ErNRbWNlxDIpl7lm34,110 +text_unidecode-1.3.dist-info/metadata.json,sha256=vYPs2_8Q45eS8mrUw0qzf1NeShHDuX_lpyh8S3yqg9U,1299 +text_unidecode-1.3.dist-info/top_level.txt,sha256=SQH9SRjWlLrD-XgHyQOPtDQg_DaBt3Gt6hiMSNwHbuE,15 diff --git a/third_party/python/text_unidecode/text_unidecode-1.3.dist-info/WHEEL b/third_party/python/text_unidecode/text_unidecode-1.3.dist-info/WHEEL new file mode 100644 index 0000000000000..8b6dd1b5a884b --- /dev/null +++ b/third_party/python/text_unidecode/text_unidecode-1.3.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.29.0) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/third_party/python/text_unidecode/text_unidecode-1.3.dist-info/metadata.json b/third_party/python/text_unidecode/text_unidecode-1.3.dist-info/metadata.json new file mode 100644 index 0000000000000..3d8b506b3d82c --- /dev/null +++ b/third_party/python/text_unidecode/text_unidecode-1.3.dist-info/metadata.json @@ -0,0 +1 @@ +{"classifiers": ["Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "License :: OSI Approved :: Artistic License", "License :: OSI Approved :: GNU General Public License (GPL)", "License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: Software Development :: Libraries :: Python Modules", "Topic :: Text Processing :: Linguistic"], "extensions": {"python.details": {"contacts": [{"email": "kmike84@gmail.com", "name": "Mikhail Korobov", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst", "license": "LICENSE.txt"}, "project_urls": {"Home": "https://github.com/kmike/text-unidecode/"}}}, "generator": "bdist_wheel (0.29.0)", "license": "Artistic License", "metadata_version": "2.0", "name": "text-unidecode", "summary": "The most basic Text::Unidecode port", "version": "1.3"} \ No newline at end of file diff --git a/third_party/python/text_unidecode/text_unidecode-1.3.dist-info/top_level.txt b/third_party/python/text_unidecode/text_unidecode-1.3.dist-info/top_level.txt new file mode 100644 index 0000000000000..2f7a53e38ab73 --- /dev/null +++ b/third_party/python/text_unidecode/text_unidecode-1.3.dist-info/top_level.txt @@ -0,0 +1 @@ +text_unidecode diff --git a/third_party/python/text_unidecode/text_unidecode/__init__.py b/third_party/python/text_unidecode/text_unidecode/__init__.py new file mode 100644 index 0000000000000..80282c74a24f3 --- /dev/null +++ b/third_party/python/text_unidecode/text_unidecode/__init__.py @@ -0,0 +1,21 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import, unicode_literals +import os +import pkgutil + +_replaces = pkgutil.get_data(__name__, 'data.bin').decode('utf8').split('\x00') + +def unidecode(txt): + chars = [] + for ch in txt: + codepoint = ord(ch) + + if not codepoint: + chars.append('\x00') + continue + + try: + chars.append(_replaces[codepoint-1]) + except IndexError: + pass + return "".join(chars) diff --git a/third_party/python/text_unidecode/text_unidecode/data.bin b/third_party/python/text_unidecode/text_unidecode/data.bin new file mode 100644 index 0000000000000000000000000000000000000000..523d4898e815eaff4d5a0b16eefb5a0a6835b0bf GIT binary patch literal 311077 zcmeFaiDMndb?(`-&t&#}pW*X+d9)=3^91$LW8X{^#j`nf}-5f1CdI>HnDi&*}e~{_pAk znf~AD)by{W?@j-D`Zv>`PXBf~J$--r!Sven`t-x;jp@zlt?5V8nd$6wZhCuqXL@(~ z@${4Fr_-NJe?I+e`ito=r=L&1nAWEErt{N<>EiVMbZJ_jKA1k7E>Bmcjp^$2(R6M4 zc)C8_m~Kv=OrK7-rrXn<>F)Gb(_c@2GyQV<+v&4ubGkR(pB_vPr_ZNH)7G>-J)WLS zPp4eSV`n&1Z)BiX9wttN8{9Dtt8`Hn} z`E=(mQt!r%4EX!^e~^xU_Os+p-{1Q|2K(UQqv`cG9P8=ybuC=@VaC2Y{pDXyXWn`{ zN7#_hpJ%{J|1W0j7svk(GIsu7aO59l^!)!i$0y_eo$2P*w7;LOm;OJ@@E?x<+ZjIp zA2|5kbkF~{avYBTFQ$X7@L(-1%*XjOuzXxdb&iW^^wr~1c)Wb9hvOU{q|J;HRl?@|#n*X~v#&+TU7G1wD77y!3DH>(kx!>E#xGXJZ!cOb7cR?^n~W^7j|f zs0(+e3ooY!FQ+RXPFEgJPajWLKb)>Uo^~E9GT2PFCyf(x8iKtzZ9ZIx3bJRew)4kF z-5}3(j0-_n-@25aTbtEj7V+HHESH%UY4^~P(;6+!wdYStKTY2U zQI!YxrZ1kSUu1D5ZS6;1fA#af@pvy|{-rITRA1yj{Zo6>|Mx;MPyiVGBL5$b|Bo}k z{znp9`M<5YBFOx|o1xzlFQS0R^I|A`KP@l+zn;F(yT>_hgi-+IlR(NZBRcfzrFwNY z_RatE>HWP>3ZMZ;S3)cIBh&|XNh9)uG;@CX{Oa^{e|q|Ix^-*%o4=7b2z^mC>6))z|UJj#2*F^|2k)X7UZQgd9Nq$(_H@|xxY>;*Mp#!GU)x_ z%Fk!;`!rzuy#<$NZvP&ir{)&_Sf9TK4c{0?XxWS+Y)7((s>fDjy%zy~86Lda!Rhen z)qBTo{QTI}Ty%=Y{(0nmF(ARck4!(w=LgAoFD%Ew-OUjI|6t+YF7KzrwFreeb#m9X zrpsH?H8MVkf(l+M>E`xy z0112H>ElH^b9=w#^VIag!|8+F>B8gb(&KO?c=y3{XFJX!fETE{nT{_%mID_8ld$m9 znTcEBBW`jzNSH%}_hH`6{ptJ`)Zf?&ow3c+$mMYQ0F<~Fu@5!|fy198HqK0hg`YFv z7-<>^Df>2r9Yz+1)Ar`Hos-?^L6GG!=dtMR=>?%j&^kDMkV;RIla?NYet-_(%b-*E z=#2Z}LRyL@JlRL=ks2-sP~$kv{~0DfI+%73l+SBte)OXsy{3`DXfEIM&TD7*?D;H$ z?_u_*r@hnDrPI@+W#EsL_ z{^{xF>FMBfv?_R*$cMZ%lj9~Bm*afS@8_65QKQHwoSpA{Hr>@5w*Q&#_Bvm+#(c#m z(!`^zJ;c+MwV*!#+aBIFnib)^>bGWr%zlf{<2}Iq<=&RgPkmd)zgPE#?sG<;|ei&Hc{;9oIgaE`OE+?=RT!vlz@< z321&8SM#QKkAqlX>@%XE?Kq^G+mq-<#Pvw^coq@tV1DuH>GU~HF}4^3J^;Vd=~vV9 z&!#7zrNHOY!>{b&%{aUzSsgq7hF7BCv46x2n`v%4pPX?{p~H+M|9+L@&!%H-ep_Gj z{ejwBq57qR%%BK{nV^8(8DO0C^ZdtL_5;4bae~LWf#uInr`;E!jc{Q3_HQ}=-SzdG zKJd46u^{Kq7iZL{vkVtcPhW+BFLK2$Oq)2NIh!-z`_#gSf4x2Ds0$(oFw$f0;4IS$~}WVb_zmz02V6<#@46u}S78mtwU2e>HddU$fnM5qnw>$8Rx- z%HQGi!oOQ-76)@jZoNPV&#@qs342mIJtgY>SjX_IM8xw*{yBXbrtHL!?nM)xWXN9zT|P(SqL@#kD^be* zFgi1jFc>I}W=4g#bmS=$0|Z+iB>!c276kqxG6`=J>r4;PoZVg6`yxEu-Td7!@ShW- z{|I9dk5>sW5pJ%M6%ST=@Gs5`3{%$i#ly*L>$DFO2VI#SUe%RvX3^#<4rkgtj8{wi zlV!Xu!k_#2ugBBlTr5Jd1X_ewJ1iu|qZwHyz|M8TYa+D-Xv`_HthX1RnEy|o#!uq+ z2s5@8fdSE@e(#8+>(@`DX;EeElZ{kjh3Y-f9M@rUr624Cej_nt0V=jtS;bN`!c=Pt)L z2|U)|TddJ(PUXg1 zXWpGoB1h+D#??jkE9yRXJAWqDT9(8gOBDBp>eCIHFF7hdcaCd6ca8~SrfvQ_iN?kF z=0d;e^T;S74GMn#8>^or5|nzHF1>m2&5wR}8~F`30X2aiKafNKi1R;|9E70FU=WQS z7sPi(p%Y14@DC)S#P547VZw3~Cy8J=|Mxhrar_2RTv-g_xJ~>4-T*RBMyFvCCnD1~ z+`;l_ze(GI?B!z1=dYhI3q1jrmaA}nk_>)pU#~WD()G7Eh~=W0`Yi?;hdj(-xrAPs z8{G}4Up`g<$9(L@aL&v3mL2r$p~-*6IOZzf3R5T1d#v-hg;zhn=`da`@om>9iRbk* zKZ>xkKHoGS-&XrK-2aY-a?+50&!7onUNw^jO-qjByNDzUT#YeKBJMZx;v^e6iAN2j zUd=g47O$51eXqa8T8LX|@}!ae^A!UzG^73p6T@;y^(|rTKd2nOZ4xnS*D-`zj<<`a z%x(nJ-)IbTW4|3f{=ww1p!b+LWDR*UM^<1LvpFbYt~1-kJm7gud>*rpomb~~kK;>r zUSwCbOYTSWyq%q#FHiPpYv;i+=z4a~DIYo2RL+)XeNo`!(dS}hl;nsAkB^I+hll26 zUN}7CJg#crT=x(8tSnExd8G2+p|uv4q3jpTfAEwIiKV}6Qs?Xi`Mm|t!6C; zWo{T}or@9lB<2fSF-Vn=5yX89zcT4$UmiQ^RybqucswT(=*Mf4+{yxPt& z0RMX%&(d&sEUrVkJ)Qcq+kb`xKT?jnq<_8p*Ga$s(GO02ljeU4I{JsuaOyrg7S9gw{@uVbHwLmT` zk>?ILOCrrV-A=7-dfZMI+ulgl_8UIZN@Jho-Y4n#lTS|MnR@bQKdO>vGFKlS={+CAKA4_9pPsTti_EJ+3E-AB=uw`gh|F^rElOK=r=2|7^JqGttvt%TGd)ea z3E+aLPoL3xZl?wiR*1d00nkdexX=`)ZI-It)bCR=6L$~tR z=iHudPEQY~r+FYRSx=MoG=GT}qflE^qaGElX;tc`^43<0Jj>tV^lUpPIZN$l`P-eI zC2RjB$6=0b&bdf_k{r7^Qe-cGhtuA6PI8vqz5MM?d(_Tf>ZU3+shGc1BP(R>hphdO zm2)o04_W&mYag<{$lu}g#dc0|mfSD$*YSRlY>erP*hbEi{T2B!mKkHdNc&%;{V&pf z&bgpO+W%tk>7t=~LCM4E-u84a`S;Ssz5MM?_fq-G{N?LO+d0Wu zvcAmU?(}7{4)S-%gVqPhI!M+*{&uH>WZlnS9%$drNzRgWKYw{tGFe~e?{NBhJ103y z*4O##h`&xY#C{!GbDnG{O|l?#XLH&)oOZUSo#gH$b0>eh(@rYx#PoK8Av-~koO3~b zjCv>VvGXEDPUo+ajMK?Ko$S;3+s$)x2}ARjecX75WIajNll+yeCxfgfAuH#}hO8tD zvYup6Pco_}8CK4@phQOYWDM=eJhq%r#%3H|&W{>OPhSNSw*!$mOB>1is#JWoIem6G zeYQP)maNZ`^;!O!75$C+Mtf0SbT_IS%|&rvMnzsmMP5cla?S<$QIVHXk(a0kAQ?;x z04E!sg=OIwf_ShwJ;+ny`Oa3d9wh5Q{_<#fYCRy2OmfXOp8}Mi$m9GSPLK2Cd1^mS z?Z^4sogOFaQT`66N835cS+XAGuYEpBHhn(|7jm9#_>g45?MKw6I`wV97HDF$agb3T zWYh;4bQwl=ZGJP^N2&q(};KkeM1~V5G2k7pxI1F7W4?51V3{7%RqJDGYdyw z90p7ecXI@M53^twXczZ`^8xlC{+HNJ05bLvRE&)TCb5ecOtM14fk$%V&<;Y$K}b0W zE!C*pSEJA&v@2f^1rJ1q#7lvQJR^&>YPSR`(~B=pRDo|{eAlR05GCHqf~b>e9;0M| z)WtHoYmb4Wl<-LB{4E`XbA_v@DE68b^Bi)*#)Otk@^^B%BW+(Ln*4dfnuk6Sy_<)k z^Y}{?8Y0l~bb@lHbq zu-rj#InrT|+)9Tz7D>vd!yJo*@*c;MtO}&j8ma={7jmQgL#rRJW$>o>9t34 z(`$}Jl7i_q$0DJ;$FU?g(@_>zw{z^~h$7>E=@^%s7V*Vt#v{;|j_;@A9?4C|ITi_y z+hdVX-s4yjWYv)9h)63f5Mrexk62kcdX|oQBsU%9SR}+_9*czX9>81?Nb2JvjF`mKZE29!@MtJA{{M2WUt;9)XLr^EB=BNN&i^u}BC)Jr)V& zJ?4a9UWA|%g2mJZD$)@p$OZ72Uld{gR%Uk; zLqBsP38HHdn#Q;2A=sFMgd7MVI-C;YK;dL%b}ykpf@Di_pbWXnP7dPg511 zlnGJ(giNuN1W!3gVlR)fJCH*g)ZeT5NSe#t&L|#0KFMpIyABD&qBnbu?X{KorGCI&P zr|r1QG>ki|KTA`&hxVf8PKQu%1G=>XYo6PW;yDw zj6FhJ$jVK_+{}nGKM2K7LU9g~3@j8sVcvl!V64tO7%}saCx#u7Oc;_1PJ5ii1|FHf zHfD(t#QVoMh};_xH1JOBooJh|J4d2$0`aUk=pFo*leAQX!Hm{#YANE5u|-hD)W2%0wGgMQypAUJGa=h)TZ(=E)oJdg6R$0C8)n zG$sJ?dJ# E!XN;;q!U-fBc>w-Z=@p%sy6OEo5~sph0TX;9x1gB^?4PQ@75>?K}e z@#i41(id@XuI9W9I`MVCEI7qpvH};|^GniSC#2!)_RBqdo*m>U#GY%FKnuGpv9bHz z(oCS|7q0k1PFC}B*hQ9~5~m3-N%`(yg0ytU?-OK|EOB;}JR))7Db@J0PL`Hfi^~1y zDTzDIVpaYFAS@#$n%vyWH;DHVB#*y+2@7qxmqpb#Gd0QV;>XjOpG;?ddTbcWL;4Qq z|1>vUzXRca$e>qsmnOa^ZS#$1@MFY2bN|gJ@V~j^bnNkqD5-OydEe;?PYfJp#^ANt;*8`G3Uo>{|XJ$D42FsgG5$aq*N>*;CyWeVo|k9=XHg`?X{ zDf!|?i1Ujg_wxf%lnng)joA*?{RywfKft3#YbI^gctj4~VCF=P&pViw7St5S*xR@2? zSC9T|dw$nEOUbVuA7n9@<8qdS=i^Eiz?YAWEQ!y@)vT2-A0K7ad-=GQMRbnqS)*P) zZe&UQ)#GN?k(Un^!CyT-&E_HpzsCIPaXTA&9Cxz%?2pN3C7Msxr};`$wr%}?gP-z) zfxpY~DidG5o*B11`0sN5>HwFo8OZYSEsDz${_+3)$;YGgVYFcWKR3b;a?k(gdvlrj zbIzYXv!itEKR-3S{C}6d-SK~JllK73|L1#$k?it+?)O^>$J4p3`Ezb({$$iMtx!fe z+U0ZZ>o{i+`tkpB{rPhq>-;(QJ%7%9&!0!_(<4_$?Vk=!r}O>B@BMm^NfA1l8^1W6 z&aYzS$FY9B;Ok5`A)LEms3 zKexb-r)$@y@5gohU@^FPkbmEw9mf&d@Z$MTrt?>( z^Lx|#?+1&YZ9fg_UdujUese4Drr@W6FMe`8h<$Uq@a&U56(*KwA75hmwP z6F>01yYJ`6n13*R|NFoC!4IO^zbglV6ETY(ylsO69Qk$r&-aTfEkkEFZf zPA!7WIoO;(`O(FH%>OeTgu;Iq9R%jL4XKZU@;Na6RbFrM-TqVey9(gDwD`*e`uzXP zrTN>K&yCCRd3L^@Pu?E+A7{ta z_&hf*$7lK+t&Wp-dfAV&<7#}K8<*oVBcJQ`j%MS4g-*!W9y(kZ^^BD?NVr166%wwHaD{{`B>ZQCgotu={LAIR zKR%qVz~tX^F!^2J;~!t$Rurt_mlfpvdjvVZ3%vXxW&aRhSnij{BKw2T-<$sG!w+u! zO&)`MD~~YdOXsJipC!#lzG8lA`ghacwJ+tUw!|@3GcD$v#GycJe!|FzLE2FUU2m~Z@7A$S6nT}dXrcDWzDcYaR9!o z&x@q;28O&-B0tWYS54#<4ZO^OH}>Rx8F>Q$FCoZ#3i679yt*K79^lmlypterE8s_& z^IOe%Q$b#Ckhcir9R^AB@`Ai~An!8B`yBF)i@Y`!PEbF`Hg@Z|*( zc@srm#+TPh7d-+k7jp_Y-!TV-@2;jA&i6X*u21LJryJ|}L5REg4)~4y9>que z;Kcg0_Q7=VgS-HI`skBs?do*#YSK&j$Z+jJ2e(z?@>n@JoF6v7~^F~o#A)42Q=AEH^TWH=C>g#Ir zqSEoA+Ps`LZ>D|qwX}I5?c;DMJRI)|9WRyjt*v>f$^7otybS1-x3K0ts=iHi*56ux z)Av4^?tU?MsnM&eyoHwlRH_zlb2W z@(T|UFM>Tu1y=VKbLJrRqx@*WPL2mT_HsPSaWBVyj{H5&aX-hC90xg`=h({eXfw$U z$pex-l4m6MNcKrM9+TWBc|vkP@|+~woO*5Njv8;7JHZtw7M(~Ik7+}-lx&aNLfwyeCdz4OR|UY*}u zE_r3shA;Hyy47yC&kwgZZU5tzH@&@W8y}7fit9E1!8P$8Q?SbxcKL%F5y3xQ!EPw2 ztCi%{p%va6I&!B8BfcMQD!XpGkpNnEZtU!^CTw_mj7KxRaMC?&b~F zPd2yTNDucn_crsErp>3jdwHYFqs^yJQ{&O*v(WWun^)8xZtpzI`wKCbRxHS9=XGr#?A(8IdcHR8 ztd$GV;OW`g^dx`pO&jk`m)=7yrza%O-=B8gZ?X3kn0!;q@_zRx`4z&K(~}$HJ$6_4(a5rw8HG3tp_a zek~?HU4HZOo73fu>B+(LH18aWrtChNo_=jeo8S9-wi{-yPtR%V+O>`8+VyFFZ@Q76 zLcF&S-kAr}*$30P2h-aRrgt7p?>?A*{9yXYgXyOaqAxj;eJHQs_)d zok_7XDR(yI&eBfGolUv3DR(yI&ZgYilslVpXH)K6%AHF&A#IiD)>g@CS#xC&32|0< zu;RfA3RY&Yf`Sziu8?qrgexRmA>j%MS4g-*!W9y(kZ^^BD?NVr166%wwH zaD{{`BwQik3JF(8xI)4e60VSNg@h|4Tp{5K30FwCLc$diu8?qrgexRmA>j%MS4g-* z!W9y(kZ^^BD?NVr166%wwHaD{{`BwQik3JF(8xI)4e60VSNg@h|4Tp{5K z30FwCLc$diu8?qrgexRmA>j%MS4g-*!W9y(kZ^^BD?NVr166%wwHaD{{` zBwQik3JF(8xI)4e60VSNg@h|4Tp{5K30FwCLc$diu8?qrgexRmA>j%MS4g-*!W9y( zkZ^^BD?NVr166%wwHaD{{`BwQik3JL!SAt9u!j^E6KQ`4R8Q`3d*okypp z%bUBWrjK)ZV{4P^+uNJNqBoLpw>NKW?i?@hVJf_Ln09h(Y?FIwGq=uf?dC?)yIY6c z|9Jc1sp*>S&&kKzDfFJb-$~~AUHZLXdc#H@dV^9Hy^;<0{O!v98NmnVcq=dFxMfR zx(tKTAEFVLAI=#z|52_sXyrd!0kha7%JY+RaA;bP>$TWv715{bDXyq+cD!j z{ardFQ3f`tBpB0&G#QC!=o@JYjSC|oOgc?2?!k@Zkb88bQ@8~O;Bke9iruiYhsnQ0 zf~MStVO5!a!hoA-TjZb4E}Py=$?yX)FnZ{#;;c*2Mv6PG!RYpQb0~`~9Lqs2AER>b zrA1X+GE_E{JW@gx)w}CZY64|fLP!aN2AN;h$@fxeShP9XLJcUD zGsv3a`{`1Pzk)s8c#&G?rTy;Q6>Jy{A!o)kr#B&ZU30r>Dp}a!5HIvz;81;v0=Wzu zQxRq&*|aYoDGXCLmIe(a3kJc0|JiEEvnWA5pb7&@A zp66s>lfqC57GO1$wVWT&J(#D|I?CHnETKoxRsGb)uBIO}?@~UmraKN5@IB?3yRpKs zILyuYjaXgS%~{I5R};*c_yEGIiTQ*L!$iZl)LADHE*!oHiua(c_fk0YBm105CfZ?{ zyKqF_RMa=ku*Hm@nrd4JE6*SR<+a{rXuc}_RYXVUxas$f%6TgisshQD%d_4o`E^lt zJ&j+1zG1m_EwJT1G#D9g*o%AsiE2?OL{K3KqY&Ocw)dMVQ&EI5%Qhe|f0@Z6s8UGOWT?nDQ%Nbb*g1pmAcvUYI>QW|cC#?{!+OU>@jjTh;huhv99;|IB zyyBJ6NIy8)I;dA!g!;x!p<_9FPj$OF+Ym1Ae5ziAs(iwa5C+FC=TuY0Le!hwxKf+U zSqC9orEr~Mp*=N+$tn37i0hjg?=IFK9q!uP?OVjg}C9T0-(T0$_O)*8s6UR8VVLWig+ z5J{1NkR+Wiqht;C&`8SGqur^guuMem3lJ5=gJ{Q#gsSe>#F3(*I)N|%U$|pDP__%> z`t3n%1Gy4{iCnIJX<~QBxq?iun7%3>vj`=k%oWaVSn!H8DC35qIJyAbtRkpb?wltn z!{YpMj{DX;4RoTdQIAT|)1lU_VX%6&)YZd8NHC;Um6oIm;S7%qeK5iM*JP`WRHWtD zt(3V4E9Ghu&;#{`*WxIxXyT1RssSk&aHF6i#+A=nXcuB`>BeS~pX`u=>Y+6bx=e2L z57;3%2@yxo`3nvgP^f8tl{X`Vs#gCOFg~;U+b0d05jz0 z)VM~3Y@aGb@?mfYyQ)Cw_zL!^b+z%k5Lc$wD5{)mxtp5QKsl;;=NaR0`}nVnvu2_R ztByKdB%8ddrlLhD7(7gOe9lmEFcQs zMIZ#Ac1cGI$yzTx<{svLLA@n+Xgn&Fhzw25+2BBR(kLRQb_OG?tKyVerFl`~vWz1b znvxdhm`Blo-=kK?5^o+>M``s{+GpJXF;0!I?r|;GH^hOQe8@=a<2ne;A%rd@b6bAZ zFLoH}!_rt3o0@Z^GizxT&Av%L^%H46=kZm=HK{}gA~pGdTdT|cxb7A78*IMx71$jV}C%T1T$8JG=^SL_!x(mdhEWCk3aQX>W|5=FQdLhl zb*1&Kb(V!0%mwFPWW!fod;S&bS3Ih6-7uz@ahcCbW`tZ0B2tQ~KwiProAJ3J(JYXA z4$*R|YKEM}P0QC7j4VZ797g$099n!fHq|%in3ik>E1{fB?@+77X;#}wYlAq!$Eg0w z=Ck@Do1}UfQ)JRqI*M9rGg%3U0kzSLhU;Mf7<*%YEVQ&`L7?qbzqFj3GI$DA;3LEd z1?UliVJ;PHOo_txtQc~&INaG}r&jG^#5uGptAux0gF>CRYUdbfStCTt>tV&W)N{SW zq_z*>V$%y46-%3FxM+|Tdvo(i8Y!zbcAw;Ks^VdLtFdg)FM~Wo!9ADoLU+qCwHkI= zsDm$SY2U6OrbeZAs*A#$3=qo~Po18dq@Lps0Wdg{f-f|rD%Cj8!*6?VdOo_*!lTO} zSX)fBt|g&Th+Htl`dBNe#w@zaT^nRToqix4)nYLzMwir>_h`wu!bCttbajj7&elgA zZOD};2Rrw9S?o%GF8Y8_Be;bA0!Uh^e1JInGDJdRo+uQn!vmtnp?{#4Ju=i9)OjKi z7wl;43Vo>A?PIWgm^)*)JfosnCl}XfwF5)-=W&2Y<)$+RysZ+Uzokf|vG{CF^~$0! z>~_*Tg4q^vRE;Qc5E6!#9xKFT#AsDU%mjU?3n`&DJ;(bol=tU2DuOhg5MR<^NVSKs z1oBZA)yT$6d7~wWB`^@hmBCy)H|N2@zWO`I1vi>iT_#&mm&Egu$nrZRrn8>7IBAy4 zGxZT^rVg{KvDL#gcUc=3A8mONv<2|fo~natM?=#OWY|xDdvrVW#>=t*DrHaI%&l}B zJWO%1e?X}XYgkNCMfIvGdfz!ZH5v}5*G{=dbS*g7cTmZBL}gdCL2Bw;U4&Cn7CaSp z>kY_GK_~mrY&emwY%cf^LWa(`ti$r64LcSoMn37W29e^GXf3dVS8gbUfLa>6oPs_W z)73Wtn09P3tDUp{#z<{d#2B&w%UrqlrapBM5pC$_uWr7;!qT*f<+^lh2r=-tlL+X! ziIgx6BJs5}e~lX$PnGCGL^<{f6aiB_wcQEARSG+=$YAA0DylJ7^IovHeBxuM0uEM( ziJo<3M*9%N6WSe>6ig`YywE}>hK@4R%P~C>q;6;kb#SRzw^K{x9P(niIY9&3Ibm>B zJ}1V0#k;F*Foo}?=1>hdZui=nT7gFu&}5C`Exr*zQMIsOhD2+UE>3xgR)K|y84w){ zpo3GTj~mR_2Ic6A!8a%LEgmu9w+9W)#98wi>>TmF25)R;FkyamOU2gp$-XMPr?!d7 z#O`5Jlm!-0;UW#wJVdBq_1lt~%z1kQ>Ql{3fD63pkru;N@^f1R?S@txO9sUXfVpTM zPKH*j=)zF@x$A5YSEAMoS4JCS94eM#r@F|U!N(|vb8}QlH1M^wqi$J&h>A+UaK>vW z-jQSkmS`uGn`3vyMRVi$BmtE38FaIuWAp;4x}=a`F6|#r*3z7-28uhKm+xMoEYKp( zG^URpqyl?6tUO$Y6$;W=v^rQd$G26rD%g}ZT5&<;5>{Q+pxgyK$8~ut1PJuRvfxJp z9`#QsF;jUJ86(m(21R2}Ic<#zYbcdC2je2_wG>s`^ailfwFLpDyH_-Z;#EC{Q%IA} zEaZlT0klr4yNHz`7K3j|5WFl+;`Lg5;)bqYUMz#~+mC1flnS%|(J zHHBb_8cTmL)H!gCqhY_Y179;f2uknKiIR}LD=`II4(xsDzEn~L|0KhKL&@|kzIV{YC z;36$7>us4~z(v^Ywtx43a;05hNu>e#59i~`N|`~VrKgMFI0Ia?@^+sdYh-R)+#hef2AbHPi_5YUo?-m(U(DKoY98RI&_tBW5BI}}azi#rh>eU6dw2F#_DF2f zbpqoK?2C@-wMGGZCReVDXhUH5y=&KyP5~UAhZa>I1_h%avQarcj}=Dab|?nP&)*k zp&iwpJ_?mGS8^~JZ)~M=y~eFAbSKA%8mbg_=Nv#Bbp#!!7w zxe2JsQmWr*S29y(tzL$XN6kGUqiW4cxW|}ERWe| zm|>BdsiaYDWMsyCdJts7(={DNm7k?#W42q#fIW?n(b(#gJ`yOw1(Zq@rgL2?;Z0;o zV=gj<5xbNMkwA_zi1XVSyIUJq!w*^p5EE6^Xq2Lo73a~z$YX$~q6D;UykqN)~j z>^+(Da;$Q45^tdqD5ro#V&qlnF3~7~OCbfQYzE4uLTM-;Ef|A+4YWad2t6<4)oIUMFi}WdwyEP0cgllk<02en%_2%iy|Rf z2>Vi`N{h;=Ml!?%Zn}tYF-;mzqREviEoyZ0iWg5k6sU3{z8!5wryWK}7vPlrS9I{G zi@7pWTV~0?Nt7BQ0jdcxCFE;fM#yk^>P4gUJZ64w%z$ zAA55YmcrDBLTLmh=Tpn+Ds3SO09mgtrz8SZLxNykNG-UgsLO`yDp)tbsu&cNvmh0$ zZrp;J$YG?(5KSQ0RvnRgl3I>~*u{1rI;Y}W7oIvFz5?wz+HHjpB^s0tCM~<77G%hf z)@GAB6gq(1)b3!^$!@R$1xh(&5MHOv6on|6)bd9>6_k&(&|n62iyY@x%vA^EuG6%>fYIWma7r=59j-K^?Uu%qb(alC)OXbi}NSmlAdp|P>xw8N8)b+c(pgJ9{JfMM*r<_v0d2?qZ2Miu1cK+o+y+LXqx zq-^a$$9@K^CDmMX=d+dp>(i}aHIjcdExMZTv zMBZos1GGA8T8^hWYP9KcQKYnBNT(DRF&;#NKpI*hi73)oJDP)bgn$q)Z9WO{E(@%s zN=F8E!W+^)F=WsVCqc80iA=m>Id>jXI*nUPk$U|ujyvVA@6uNq!Js_dpK+XxS%*Uw zopmtv39QdJoo8`il)1F~EO)tKq-VpT&#WJe%3;tGph8)UwL23|MbW3xp3BxQ>;X7f zCmd43BP8p3giAbXE$s5~Hy0Tj9BD-AlhTA)UU4D|(Uw?5s$$K?%XEr{gIf)C7Z$?p za4V%%9$C^|LYI+H+o=BF`rozl`9LPFEVepN6n!YSUg@GWP_eSAIXRC=B#3IDC{(|lVTf%$2bVQ; z1mL(Qg|+!&Q9IQb2qYUQM72{^wWjV^ngGK43?Nj%;k(jJKTw(~=x9EaVI52|pxlC> zQYY}#Ths=q6ElE-O-TMBaBNSL%*UcRdVS2>vQSo@>d~s2;apgvsh-kh!&|( zs5N>?pE)t`X>iQ|eDJ$G6><#os>HIZGkQXvwnH7!)1@Uzg|4CS1|)?-vryrI+U_Hz z;ZUuLHz;jSYq=RUn$g$Tl6#mYpQH7Tr_LB%8nJ z@Q0@Y)W}ddL{f#?wL3!&$LdPM-0f8s<7}TLET@a|0yY3yy59=8?xk@YtR>qR6(TTq zLX@s+r1gxq0WNwh-&$X#Y>20UUc?Gg5$Hub=(rnDJ_wO)Kn!~)^c}&N56C#j!9l>N z5z9k?4J2{hX)M^vaJ`j*1E|1sxgoAd=h`5*T?ugMKq<(~@$5=rSs#d04eBW$VP0my z4JV^lIoHOa4mF!obiI+NW2X_05o0UEF&F4OjZj_TbR*8dS3!F7XlF#lE7yb(owKs3 zsmc@8t*GA9G_EVhJ6wn6Qr_>ZcIH*eGREX~L8D&PAr^JvzH6lDgv|rt{T_^JFifY+ zE624K^v6e?9g)x3j(B{`hr^kGTft}6*XVU1^XG1ue7-S{1mI9@>kRxkVQp*;S8dtky z08Ij|xL1#1rv;Pbc6rPuZe>`4Ka9uGs9x{Q19ik2;azUY7-pGjr=*v%J_C+(=z0N& zEOiS0tp}Vn z8%;p6;f6mc0E(j_g)JopEkjK@%%(V(2(xJlsK@nP!*PaCl{24aA|zE%U|ax7wZ0uH zBVD{ndx9YYBKrT!U+4G!WN; z4|>N;gznnOT$azYS!`qW?3AKbLFuRo`oLK-HCs13y5r&K`W=|$Qo@IsZh#T6-1wf` z5lY4Ckg)3O83h)V zI!hZ%HNa6iONt=7JvM()hPxVn9hDOrAY9>Yqqzdw z3Rwk_aHuv6JBEk&>I1_kKhi2-SE!J3*v<1e(5 z;Q?7@Jo$lpzi84D>Uq=76x#UQG6 z25S`4DV{Klxi3~|22}(`*rK!GwJ^3A+Xjpw?;NrnIVLeYu{jp2QqgF(^V!AYyO$65 zG92dFPTiO(TrBWuwJ!Z5dittu!c8HdEg>}Kx+){3)Ntq&{$~G<5L&@}t+7w?gvH@* zvXx-?5fa8+zpk;jLRJ1y1D4g;#fjunR5wy+YHBcRdvLJ=tgBOcRj1*DaG>3%C~VT8 z^u#r{vxq(7bsf78{UMb$s=GM0H#FcC*By!I3ac7AesoN^IpZ?nK}uj`taHPIS@WEw zH4z`QG0|u+$F+0mtgtnfkyjow8iM&oY6HDsUIX9s+eXbPR6vYvYF(^B4*v?{N2@TT ztajeyQaLb!CMa`Np-;0yLd2#6D--DeM9_;0ny3axomMB5NGaV6S4(*;8Y22L09vEe z+n9;uHCwkTQjBleZa&-G-`d^@clCobbot>|+)Ky8A*!Hz0`kUQjSw9m;A=p}nZ+%YIci*A0@Y zE1sZr$9&KRA*>dUA8A`?kjSJBy9bgy?nZ}dbnvW!PjV0y^GrtSpyVcAr($<_#YP^i z5j^_6g^*-f1qxWGEX$i6m0m-1`T@EtgWeNXkMvH8i;F20diqtxB3RpMYPUy9>sg13 zmdC~MA*055gYsd94d;dC(al(&3pW}|>y)aGIwg(!^1~-p*K1#87$&KnQE9;@FcwY| z2I}GZKnX(`>Oii-wfnBuUEA8s&PH#5cusx7>dtv-D*aVe6Cco{6K3Z?!h|lF=__BZc*n)bfiDbQ%pz_cl_7p|)P4AcTvM&(XLi973j4VrDjo zvx9-5xs}r8B)2HU4b&MryM=3wA+DpS3Uv;>iG9L$`mmS-LWkxoJOUUil>`ZXzwG2+kANlv{eCIIvVPt0Z>2o7iMYC&K1V0S%WjfLVS3)bEO8d zyd||`Jo7IeWC%mla0G!VRTVa4L-^|iKI%H%mx~NNMPX%UBb1eJe}J-t^r># zKh2cP^gwuoD{!4JtBk zP^L9Q7&Mih+e`=Oz>mytL_A#}?oxB3I^*{Gm^vuq(0U{D%94WTJO-azW(99hE9G4f z&CM|#4MAE!ZL^V-6>)DN7V)%kYnsS)@Z4P$U7PVpJ&a}qzcGP`Wrk#!;QSQ~Dwl_y zAsC4pn=6-h@6$E{L`|cZ$XkyxJXu;8?krV{*2=RQIrElU_>FN*zoMXc-kT~wM$*yB z6IbLstF2a_-4*R_G~)sl`e3l8vY0cFH+D#K#uW!axsQd5PF`V@UbU0xU#O6anrepC zco^O|cXi?bnW%pBv@tUD&xViJG)e($Mbu=LBeWT2r;C1lo<@q7kf-%)jpAVRAqCwU zm{;W~ubIt#m5w2qKCz-d7n=rgN*PU8BJ= z146wkorad628VW^YKS~7a`ZXc0i`M8naM1d`E#8ql}W+2P~~Rsm_P|tCUP>ZxoA}| zuJByWDl2NEU6vmlYe1q9O%iRVPt6$e3kn1AA*)Lq0faKrI*pQ$ZgYqK;qjo}Xyl;* z;{8D|Z^Q7$th^eP=3B1* z-HL!x7bI9utsczE%;4baaD@y_qIIR^iN3;XhBSs;*4hdbjl1K+A1vYanCi*I^c#NL zPESQg^k9@0?{c0T zuhhD}#pv|V)s%RY@WrX9$xw|kBv64~`J`eWMao55ToM67DUKq9vf7Rn8V(8hgBm$? zH3)NKO{hZ2>3F!t=MYM65k>aYBLObLqfl5Sv&FIOo$QZt46vUawUC)^Wq7g4ff_b; z>E=jtMG2RXz%N7bSrekfI8cL3X2wuM+dS7$r-svM`~oOy9f%Vp*Z-n*+MuCVZNCdp z%u?c$lB${cR2S+KBiF)XHU4PzIZP)c)b-69iU>)SxV*c!`EW1UokTjDZBUXDkflKyS+r)I?bHKoGuNcC%o!Nr{Pk}=!O94w7cRuqAaX@-@ObxnZMKIpySGsplLi+7f_IoGRmFK)6;K=8IVK4 z5nJd|I+TH2)}@~IqxOy;g+px`PW5qaT}21fPShFAbT$CnaCjz|HHRjshtvYQ2?>u- zFeCjjlMT&=Nh;@9?q?j``4hFFw^51`YE0YV0+@=jP3Ecv?HQ#DuU(E$6FTr_3kXLf zw#A|8hl5iM0y=;UWm%Oux2iurpT>l*ps&1wRbkn&G|bXhBG^bF48ZUW zYP!CRat%FZZBB0w;I=S!lqPja;~Zl?5}*Zh$=u~`mt+#BK)I2bo{=jR-Pu5w)!Rx` zs3b2sw10%v{$)0H4wfj%Y?-8L56x>ux17WB9Dy%qP-E>CclpxX#L4X;oYh^RS;u;2YVrb-J z-8N0c8b;2`8p$4Nf;eTyQ1@~&^rAj#$#ntPrsAoXutht^ozZ?#4tO*aA`)h->Lt%! zgbs|zXAt|7B}y(8M-C+SM|)Bh+WoG9*dnux&4$Kmbr9rSsuK~1SilzB_)-l+*BR=l zXHwyjom+c71mK~e7C$1m$g`BwErym#kbTf12>RJDbSr1F9r~-!?kkF&VPRUtuXciY zB@HZ%Gt6>nAs83+;d%E?GAdCxuXey|PfTD9SzS-bl7Zdkrt7~Z41*z2Y^+$)K@Cve z)rE>xefCG-3n$w3Ix-C{44ihvf{NJiyy4LDghu}#?if}A{Mj9ri_)XQV>u4XsVfAm<&JXFwYa0o=(@a4TwJP$R`vwdx?3}%c85f&8c9~!QtrG_`oXoP;&0)_@*mZDu67;uH* zJ27b}g(cSav+Pp)Po;V>K+*@QFrXMfQo}f{oe7O0i!p9@Ryv$kb{3{aIua43gVs4J zJ?%^|#B{pLkrS)p;NW=a6rLK?!(ViYfJ*gczs1;k9Yy@nNWjUrK`LP*PGyUkLWV4Cztnbw>QaGdncYU_PfO|YAq;gb=AVUGQ*gzWDeuO&D;5{6i*kkj~a}zYE`+yPfysg{VX1R z5Shw6aPeYBFTgZd^Hj6c1Qdgt8%IBc#2d~k0MWX0t1$&z#JmW*CRiX&lc*86fc4wj zKu81k2Kuw(3r$#@KgY5xVVWotq#-I4m2TB2bI8KJ-NPeVK{5hIZg#>Y^N1QMLVUS1 z>I%Ww%CS4J&bZS^%Fn@1_$uc^V7*kA3`S2*YKQ8fpjXg3Bg%pimwr3KL}y?GT5ai9 zf5iZwhsJdErNh?72kh4e*PvP8t`%X<87>;56B=<=gA9#dt0V%YTn7equDl3jBT@^Q zH3-Ou=nxntRML75+)sMVO$#FJx=>q>Ot?eXsSP^FlA|?iHwd^i^hq2{K%d)oX~+WM zW0$Ek^6{N$s7d|V3)nx*&Am>Y7*|Gu3pNZB@L-ls!z|+ujCFPVuO-4H$3mUMjx@H?a zb&zgww}Nro@=RZzRmQNk(F_u^UpPp&w%DN4vDG7y)9B+?$f`hx;-Z)KO_?~&2-R`B z(P7f1pwNt3`21Ksy79P+@Z+anBJX+-1}N-yE)9yNS(Kneh!6&0K&LxSq2g$9iorzx zsy|>I-n8_NDH5TW{;~*d^o0Kx`Oq|FdQ{B?`~^JxtOJnKjp z1X3$SP4!_uTL{4%bq|`x2nR7TwW)g@3u&naaM2TkB}W`pgHa_iRMp^SuN_lNmCB(c z6Z1GLSB83Qig6|O2)$%_GoaOg9l zJ9_AR<3*I9o8yB=>Dgr<47HElg-b5ZLGfsFpdF2N{Sx%)55EKA6^*@d8E&N2i+OO6 zjP{e0v;`_s!qWy(iFK2wc|ospt=s_CIE>c$l`*k(sD3W}2|dT!*{Plmy`K;DSs-%R zVz#Ccc`z5BS0v??eCp;=XG!?I@$-R1c+lkrfZ^quV#J;p(}3EX4UGYp%Q<&YMU!E5 zbL>q6Zb!Hlt!SyG(++L!Q#G_sFKtU)qlffJQRx=<8Y$Ij6h+%j_rtBx`B79RwIxcN zb+4S@q;WmGlca;9{!GlJKIqsD7-cR+-tf9R<(SXu~>;UB#E(tmIXZcp9t5<9$Irv zsKZIgp^CN9CEq5A74sblV#qX4tf!CcW{CJEOVJ$qu8s{5(<1e4@=nwQ(HTVNq&2?= zI-MBFi`quU5e8UQuj2BL3BDOFbao73z!dr+aR$aFS9kC2vI6LYsNXDT>Pj7of4+jC zgxo7m79gIayXZcmT7cRB;-U7Y9;aG8BWu#XI1+Kt`z14 z_eu#7qkL8Q!rmyv`sm1Z&WI#mh*l3BZI% z7Z2*|=iI|*1Ep(~Y)nU5ajf_J3K-vb=&ZbfH0Z3;1CN3PmR9wX=y1tcd~aEEbkWO| zW=Tu)p~T5ogEhz9`O1i7u*Dp*1#Cb!V4d2K?!))WBkxm7_Kn?VA>`)neVmAo^xZh* zdCj(2xY$UEE=CddbiFHhnwA=;b>}P^SLZTb@DM_4!78jz?em2J>4==k&A8`X%8=62 zF21T$^OpHgJROU(Z`47Ia|)M>>cW~N4Z_kkLkBipal_Db;OV*HGEQcIcIBIEA2zv| z))`ZEuTSH{p0S;7w6O{EMi81aqaF1KHCf+$8EzDNhhy5<^!G&31o=if=%Vv-ibeHq ztwTB#j~UOAdrYIYjEGZOuaNqb*K=z3xo}Av1zpS^YMu4^;P90+1WY)O9PwVYhTi-| zgVLIAm?8Bwia-SFY#He0@T2OLdU z-_6gJ6{aOnNhc)h$iosN%z{phz}1V*7l;{+=nO{oY5;nRoXza49>Q2kSNd{);AL*| ztb}XiZcA5=fC-|gg1W{empAtjWnIX|=6>!E;HLqW#9ehO!@ClJJUUAj zu)2l>_cDUC?q?+ltzv(xI|r#(P-KlC5Yb|FO{D2?#uA)bqP~Q*uBt+SVY|_<&cok} zED3s}_M-d*tsp0DsA~1kGo_tjF<5uRP#P=>6B8)XSEQAZZS2_T1V=Mr53A%T-NK}%@FU1_{-|ljPZt?I!=fz zlWDKU@&4hn-2>&a{bc)DZmWQ`Y&F$zZWx2wR*0kp7~fXHz4TJ(uOIKzdDV^N(9|W% zNDq&U=@#AYt~A^->%2|544Fs3OaurD-u&7U`kOd{v$TkRBZhkC3RAetcS63TQ`HC3-sdqt}|5AwN4`&gI|Xc{Hd2Z%4K!9b)m>|STvk;D>k6wLG&4j5zwW^mI& zMVIm8JHQz_F;>dhwE+;&PDTS%7#;LSPifBpvd*1?-0IS`FFrzCktYmC5|M9iKz0UW z_|+j~c)72TQQ<noIJVid@H#kIyR3Fk~2#{ap?HpG) zE(OAtN>RI}dkG)8M!A zMQN)+Mb|nW9K?8a51kn}=2P%*3{h;B+3?yOA#`{wJh=-~{-aUo7g9|_;2Lc=AZj%Waz@pP3*IlOhoGJ zPz+9i6jQpb?sgB9D;@b=i|VsEY#h}tp-9%^zPw)czg8_-*!G{?jq6mn81pbsjFN}3M3f6;vY&b;c{xc zHwPCClgH*8E6r4l4klLxE@g5N z8EU7=6c2IgotzkAI;h&(<*!l(H5oGq#a%pD;-W+E-C;9TMYN<70=+ryM02$p#cT{Q zqIieNy6JiIhr+{=Gy>E*UmU+2ViC>uVt&iX#!WhSm`x4V3=;yygdL!?-`qbWa;R)r zPYGN_5(1l%QYdI9;=t==05gMK&Fj}=Qgbmwq*TV;ZfXGa7R;p`avH!Q*K*X<=$EE* zL%VB97ZH|D1sNJh+fG^4S!p$#fZ!5a@z6GNeqdBs(33F`dSgj>j5b$s4JE2oJBIYd z?P``fMU#?6U%8W!v<)>Kgb#b+BadMgzI*O9;R8}1>}I%p*}`%6orPU}0j&F5Gdb}I zuu5#$ppWF65Cm26gcE!QhoLqnLGA}2IEQff1j;sG3_tc-LqKlV_vw&aE9gW^fIk%!~Kyx#)QC^cDPlVF_JW>HuYD!Lg!d#D+NjI-Ne9B_n1mhQayqs)tXz? z9mL*H$B7B%XUvGYyRuju1D}r$R*C18;$no0$J)IKuIT6qX~fd_!6!25NWW|(0g5+j z4nY|NDn|+TkrbvrbrcsE?D3hIgJPaID^pwq~& z)G)4yjFF7lt9XEe>T++FEvgArA7}!fxRTbjw8adcKy>I#u3A%7WUOk4D`wPy5)ptRgLR1bHB0dnAvn~wBK4vUBpg(%nC~TeFXS377h$X z=fxkGO-EJIjJX^2W4Nj9m#u+=^3pYAu>jQ)orn*4{e~!sGdAA>9n!2 zg9R&9G`GlFEb`kI_4wToN#uhfFrKi>#b|}sHgX#~cOk?;aQ2wsg0!w>kcBenaqmoy zc$GV|?WGv%B~>FlVlq>4VNwYll0Q_#`G}SQ&+$baH>go53M9i;C3wsb8cEH&?DUvn z8G#5%7!+(K>0t?*lRLASF?WoMMGTW*9b>SCbX%s{k^dJ%x8zuG;` z=z>*Br_y>Y36?JddIFNs5#tYi_uz`?)99eiknI*XFI;_e8YD4Q-%8y{N*~&#cnxA! z2zXIvbrD_61)i{bWlsG4=0?blvXJIb*?Otc5EnTVr(GSiEMK5>HH2tD1$OBJp%;hj48n)Fm8N-CzLp;pM4~@-qk6!VMI!yxhPimCg>4xxEcgWvTZEC0sh0Y z2n#xcCHF&gSw}_-(3MYGb6kLPWnB=}(+DJng4kV{KWbG6syLk|grynMtuUmHR0+zY zMR24kr%=62d}P>q1Xl~^(E9pze$_(nG}=&rL}Eaw%Gkm&@q6eLgMsPjk@`{9FmPn_ zVRll6J=bG)*I{&F-X0cU88%gCQbYT%ejA$RRxL4VR7*!qIMB{&Ev^TvqYA-Qz<#@~_@3ikfzY6&LtNxQ7CZ zd#I3zEm1CkR~Fa%e2gq!(l6Mf9nrGu@@AS41&GNF=F23NN_SmCE1XW+9ep$~#0*xT z5QZXrspeJ={hy(t-wUh1R1GB<8ECy5DT}L7>u@S=1LIFXB`plM8EzA&SQp%;ey8g$ zs~H`;t}m}UlT+U)%;Bshec1x~ZimKE;kIy#D$-HCQ8X>*UFl2D1y6Br=m+#^$dZzz z{0G?8rW6aOQey=2Z-lr?2cs`GsC$gB5=ZOGua6d+-|C$*Gq{0NS?5zMU2+lG0ldYMF*+)C zbv~!tU1(>`u@eS8WWlxaQ8}xOfZAdJ{t9I=UnwOLta;4k&f@r5C zC_{m-9ade?s4}94-aHk+VMs@mW@c|?rX-b|EY%y}A-r`2L~RTi3i^^#nwz7>IJp26 zMh*+t9j6O`{c2?Mv>7{#DT+3YN-D9!wq8`X)rjZ!F zqmdhbPwG=N9($9U1y;!uu!>*K zd_mcnTtpLfW%!i_In@~)bupz8zson&o%^kD4QX@AZJ30k?n0G;xSdNz$<#L}T7@vw z0s;ot=+#aJ0~IK(t5$>U$s0FZmXq_t^x!i7#NuW&z|spa6ZRhALBn8mNv{f26>X}3 zouz6Kt_jt-hEHKpohIC?VawP#re`d&HB_X-5jLc8&|~JBTucFmmi}zj)iA;jZ2?$< zn-XkNfGjge<7#LX4JIn}(a9s&av91QhMEjbhH|u7zEc%0xH40qm`y8V>>}g*_(??s zMp0wp_=NWn1D?&Y!fgvS`UH-KvjNCoRjYA0P=S zLX=3RD5L~^ea|t52f#f2+z;JGGlcI555KI-To?CFU)^=wVTk&4=4)PKWJ%1uP;mj}^`daX9n*N`51=NDsC36C&% z29;l}0fv8+Xi9mIs6=}9*Be)n0g>0i(An(A&hubnqZYG;xlmbKVZijhqjvpwp4FBu zg-Pwy-CHSr%Clo~B42;oh?RyW5!6q&@g()yT)8&(@(hfW!nm@4S=!h5VF)&^DqWa0 z**I%ldC#<3SH!$^7O zU*6ok1s4#=#KsXqZW~?N594QWj%GydI;wRtAnZ9Z!D3%YH9vf@xupwWqmq+~L8js^ zX*SZ}yHUIe;VUiZ>ZQv*PdEicYGJ?=;n9_%xYqdWiyl3i1snXu3o$XbwG{!%K z013f}P=-4Qq?2hBth9QM!dE6!g^l&dwIKvuO)-$2g1l3aDrE>+Brj!Vyc_b#)N#S* zjs0=90mB(8=>Zptv#av@T6xX8OeD%dgo*YMk(!NbMVc~JjkPxra4cw&%?G5BPjzTl zz|MOGDJC&b%$o3{g;xCBNztBy>v3IZwoQ)njb+sXnt2i5XOyBC?Ob1fIj$`uAl=O8 zwZ71aYT-o}1rT>_oHC=&>P}v~%U7s2KcuQH42ycG)*jdkfNX1DoiuO1_q1WUbSX_)~K$yZ0!ClYm`rR*WoR`J$AjaCWAte7H>p70l^gV__-2A;U~M}!u{90kcl)X!XU3wE_e^= zqsJDTX%oa1PUfgKK3b$FIIX{jQ;O70di2URx#sBxo!^kt9U4@~IT&j?5V1R-=8W}j zqb$I1kHJbYc=T9iV5gV6bbac|;U%_XXPz~nN{&td5?h+b$4qmC<`Kn;%!sXG zTdp)GyAjch5^T-J1ypk1?6f6Kzcjc1So(n#kVFfSw5thW@FS2tCY2*H56K&kelm7+ zDv;`K$e&n8w=a+!CUKaU&?KQxKK_@gfGY*qQfX?rsqAjt4ORxx&a-&j_*2RGwinZ> zMo=P0XGv$ag8i!u*eKmXT_bfPg=F-S$8Pfz7u;h3aVUfRSV;C_x}fL0Kxh_l*=>SO zM?&Yk0Gtv>j_$>EZ@XQa?)Xe!ab#OrMH+EWScXZ#sOUkEO0{T3^u@S$>)pO5mmg=r zMBrvu$IyLmTf05X+3@Y4s$Yx5s*fb)_j%{b^zWF#8sTW2cU7A= zuE&nfp7}uC#~aN)C7w(fe|^J&54gB&pCv{d9l~~^BnvpI%||4zuS!-f0AAqQq`n{E zPI@Wt5sI&$AYM1f55A{PeBWwa?0!A~eYYM!DKi4*!_W z(;khwxiarHD_%mF>jXx8{PQO~)f4|$h>FW5! z{Vz}F-^|A%XEaP>&{iDsQPSO;hhN{(Z@Sd34p!aE_gkIlhaSAz|&38Mw|N`y;P}ZU}GGV-;ry6+DJi@JrVI5x0$|Q#0u6YKn)!r&51^ zm^lu1zHI_H7u?D+rvg=9YytXy8KreF%jeAx5L{<#9y!}`)l#PBG{-#AyTFS^O{MAV zdooY)VWbqLW&_u0zRrjBYL*zDKUjCIR-bg(BB?Y%5gW|V+l&#TbG2wxy)TWrP3LcJ z-xB~P2U36$^jGNJA*D{`!Zo!Tl<3C6TULF|5b;2#KSfJ=>`#+QHEEQ?e|E=r4kc4= z&CSk8Ms85)eB9AqT723%_-*>x5)S7n75i6hB2Xc$)zfftI>T=BNs~Ga0N7#qd^FvY zwMs(E#qbYqMw&(IEyv${o7NY?%2APoLN7ZndomN}yFFQnv8+su)aOgYq{nwKV*T`r zw`P@=8foIR_BC2CQ)1gKVQC~CIRSGfh4d#g_|R0yA~8r)e>9|7rCnmaGxP#Vn-h$+ zW`+u3EV#2`h1+8$R*{UdxwD$0LAX0f-dVV;rO$Ov)(j0rWIF3~B;u7a7)j;Y2%|AK z_c&@WLTgK9IP6Y~*^N;7wX0Gf44d2%ETLN#7_NC$8q$4Oa@{3-Pq}QVPT} z=NDHMN-@bY?O`9mN6aq=S_BEYRUXR@6~q_E)IuI?bqIoyY3m|3b$-4cb9%dZeBEx- z#3*A>j1zt&M=hBX2$0qI0-HICls^Y88rVB~3C||uJ%t9f)AU*;F2Nep7`)#LQa(pw zZ6-vbzfPle*ZqPhsyuD?!MN0v=792zJ>3{0aJszqWV&(KK>#qi4}4ixRec9M-9@>r9!ymHi>?=o+U6t=1$4aAN}ki{A}sJT<}D2-werIJADFSSjj zH6K$k&B5+nYP%d5IQ=OxZSBG&yh=M}7%@ux%!+_}AYX|ObLcX+U18cKft>l9!xp|q z4PtxVj`Ymyd9%7#sdjRN?O^zhvIw6&r5oY4uElO|yjZ)J>&l46VJ6XQ=Vr@j03j{U zW0zW|TF-fRNMrG8(%5J$$ygIxHJI;|AzE}CQW0tS70RyBlMB=#=7tSCrq;eGbrwOB z5N7L5iz~A^H^A=1xh9%8UUbSqMXNg7ageoc)S9sgIvaJihHIffsCYrc#ub_)c~jVQ zW4aY<=rpA@%QUEhf5|@?jd!q_hPt+G5W$8c(loH0!#sYA9w)3}i9_|M;Ra$rmWKPh zyHSLZa=Vo}Y~)b(U3vj{H;y~fJ_<(Lk*$OEh7w${hKC1NYUVV3x2qlw)n<`KXHXET z+f%-r_gC_PI&6N$BXp5F(#*m!<3-8Z zIOZ+wH@viDD@OV-^enE{s&73|X{x~h?3N^q`st~ksDx1;gha-qyjfyZ&|r)Qb6?W}6<->-* zK~Vwx=mlWq^2ohgdJKg-cL|1l8B13I{+xj{1GFM69jV*P*{9Kxoro`xJNr!egd*MQ zuO}5dje#-?#s>D!8yMP~Zp>wYG6(YbP9tSN%#+y|Ew&Q#ZR9MdG>(kR04GUDByFAu zdl&KXjl08gXACe{90b(8`3rAt5EvT;hrM%kM}-8P4BrsovnHbvawtHO8<~0(62ksS z*!V@$4W$TIp?SFtwoWBoeR~a$BJTl&Cir33^tqpej1LKk1TRBsf-@m$?c}<~O%p9N z@-s8n{hb1sCu4v>^S164KsUkhd74=sP7%SptCz*iiC3-^ftt`g8oU9~BryYFd1Jbi zLt6jjRB{Sny3Y0ELxSPBrZ+>hS%p-pY(Xoj{VNCDO^XJcCz`{3=weA!eSk$aN3)x1YdI1-DJ{v?nkb7`=H6 zKn_3*<@0D}^UAEAMFa%cr!?!uFSkG3zFlXbaGv7In{ywrDyvDuy~SxRmm_kaSC-o5 zF6twjCW=th;ovN|@p1xUh(4d88J?#lSD{B$%0dY#iRt%QDEtDD+Sr}$F(&cbbtex&{nSdXmFwvS+lv5mBJuq6^ucl#-EFl z88=K=>p`a)OaEmcVQzQO2m|@lN0F_Bdm9q*?fu^q;1Vsc&Q*XViid zIO?}`ZM!fGNRA_g;b*^>S3vw2-GD5i#X~o--TOYyV8a>X=)SjV2B`T{*jWMmV2201 zUr5_3Be3!*nt0N-=s}o1N!AcnVU)OO%d%I3RDo*M*R3{iOwRlb7a;MYM3O*}EF}Gpd?gQr<5_!OZBJLO&wb+2KBtPwqPrr1v#)=DV zLFK_a9y`{qJ-Neg4u#g}6y4h6_@-4S$UslCiFs7$+J?(jr~m-dZV7pLi_e{W^uip} z^*sa-?2{LU<~X}hW8Vjk*dDuiK4E4`-|zC9Q@hZgnim13_`fzDp{z%_?NkXnGa9!D z^m$luNxO<~liYm|bOJd*h*Ybm(7SM{!A&8%JD51zu|*|p3xG@oIc~W~C!*GgEC#7b za>tWW0p96c;$S=VjU1j3V6{O?X%M&IUo4wwCw zOv4%vljA7#lLi`I>1X~9k|beMZJel@Xm9q(UWoYnsrW$_D z(=iW7Re9E>Twv4n5hVC#w-g+~2&YmD#wsdHR6{f)HmrsEt&6`Zo3%dcRFrZ~%kk7* z2_ihXX7izg985e|Y=N9zkpii$(yh-x&ou)up>QrxpWv))@N_zCo2x))x~46H)bF}+ zHWCx=k%KGpUb^)+0}8IwuJmCZzywX^#a-0;TOvn;ggHaer;EgkIBu$P>nB7c{$q?Nqj?Ac9pHRBWop%3rpyrITvP`8CH=e49qDogI;GxZk*D{Y`U#EmyToE@cKI zu}VU{{;q{zS$D2!(@VO}rQEQsVdGKV&x#hANo^!g5@*G-(G5Gg!PZYU`LBHu;oAuo z2a7F*ADn%;2z8eqK>@w08FY86$V`gTBILjiGcjl&3gr&BZTsa=NM=Us+84JahX2wW zl^YNh>Ns37X+6P=5Q&pyo5Clwu6*3vWRe}JiXgyBnzp`LX6C39fDVG7t?q9 z=N8d>z?HhUiS>rb37X%rX*h}6Z@0XGpHL2VdBGA(GPd4-guCC`dIPtxFHp9emNrs%QAzJ%3q6P1tTQG&)AN6U|7$xcUdx0))t0vrJ2+vL>L?` zC+db9vtzLy6VoENL`x0=KWYVrEziK z{+9e7#b}29{7GX*cuqYhyd~`vh&KYS=zr8r#>bhX{iA7Vv~@=6;x0M!GpKYoYhVnV zLi@*pG@s~;E|)kMxHw}x0b#C6&wW%(`0^|B_%bxp(%=NyOMkHs(DV^zd*;!;jRU=&Ms@UHeF>0#$fJ{0=R<5FCX6A{(R4G(EraJ zE%gRqhC0V2_o`y7N7m-f2E25+Tw`1=#~c?qNY+B)nh5tG|A z$&BL!Wjqt?iU{v?1ale{g)uzL{s{FfB4y5gy%eTTMvDD(@J_5bilsLsZQzaO7Y%fV z3gRi|j?bs{qq`P?q$gbHK2Tt1gbX*ZU0J(ydtO131%esBQRlay%-;=drsst~_Z`8J zKyPd_7XYN(j81JIz=4?;;GDTlhP}g1Tz~$Rh0pHa?!FC63w9UH3Sk}0HP2`~W|_Lx zdv<$vw>D7Tfyt&`37z<~s&-45Ll2b4^JUJw!wLEd_*lM#S>Z+vb}0-PxnE7uQHN}~ zer0r*GKD7*Cp`OlF5$taW+(YHsI4xeI-4b^+%e9}Rl6)R>Lp`>)|J=5Y1G2mpV{~- zt!#@SG?^?P*$oDbg%jqUhFiME=a_KDHnsGgCzdIvouoUA1?&5=TYGJ z%iG^p`_=dFOh=-tm$ZeV-b#6^QR9vxBJlKtNY8~SZBF!fp=uLNl+x(T^RBoApnfbC zH&z2o0Zi8T_C%)u8VccmXzM9h1vqNhFzq6EiR z85A85!SR)?Cvt*QPN@Exn&{O~PktqHp&xnmyN0N+K*dB8hi}__eYbyc0Xqvk4mUG}Mm#<<7@IqUVq?H=Za$KN=0p%a%vDf|#r*_;hZNmpO$DaFtkI3= z5r(Oq$D=z#Ezh347ALgyjAxxcN@k;=-HhG22twdu1w<))?E!an5^H15E_?36T}KTI zSo70&A+K<4?xq3wl!0sn?(~g^Lat9bvNHi+@8@*GOe0rHl49!vSa%YhHpxw9N;3?) z!rN5F1SV545zud7VaIputSmxfAU8iNjWBBHr}>p8piQ(^^wEhF0&pvtFjnCnzIhlM!t01!~(EeicZQ1stq6zF4g=WD1&~#c{jd z;+Ob_1*DVA$M#faJ8;qbGRCOh+-EPbw>WV7={SAgK_Dh}OJ{1~I!A7ypu5Q8^xM*O zvKMoiBd$Wp0bq?dyigz1^Bew?@`}8sGE`nQqr=~Bmyx`^eV*>zT16g1@6(F*%DE8Q zgd1?BzSG>F_^7YnzCUc7$iTSfb4)(B0B@nofi7?DO$y8vwTsY2sZqR|&!;p#k$XZ> zKjs!J*zjta2;6LjuEX-^nV3&>wjI|#pp=?pg9x|TT>{yL?Z4-p%WOW^>sqh<*u0wl ztVyswB9;e&d$IZQ67r&QpgKcgIV__QN@K-MTL)}Lduz z^BUQ1^0GGm3@-;!T>CY!L5u6mT;GK=PN5lz8+`g)o=Knsg@So4Fz5`ilQ_0U5?z>J zC86o7HE;u3lt^M5$L!O3gSPhcS+KT3b~2Z^8_+A4J-pxrUQ;S{*xL-{=WE**D_n)I zm=P_?&3zIZmG&;F-^}se(+DU3>$n}pM?%|+4)FB#$yGxW&sdeRKIy2`se_Q7^!$L$ z2rA|ytZR?km7K{3AvX_R@;8vZx#fRAJL=(kZMy3w?0${um%Sp2|L+N{;>a*FGpmoK zioALQvlGL6W3u6Gt$1Ws8O8C_`v5hpOfIcZl9wvD9zv`&)=Y$PXrc_OE>X9#eoysYslSv;<5 z{CC6PR1TRP!<7ROJ&5;sk?qi|`fOXSzF$Kd0i=)zvKOwfY#2X=XR1>Ga51D5#(xBi zRlB?1=Cu-LV*yE`hIW@9%+;=<7#0|My7k${AYF)AcE#7`ns>>21CUIh80=fMi(V&9 z&*wM$&Iji966a2*obQ*Kmkx`_aaQMTS~-p`-REh>2QNLtv-rIeAo$g{_m9h?Za<9B zJgYwE{4VWgBPnvpMIz8EF2#NQKB^D_FhpZH2dMEcth1Jdv@ra+AORIehu z(P??|#?Xr=?6%&MGo{`k_(?*{HrUbdi_z(~ox(VXmT*kqgA=slz+xvwdWH;!w?^Djl^=OU61GV{$&6xH11F(@qAU$u4vML(2 zNwCcQ^XS`1F2v4HAZO81pdj5IA*z^ql!B+xtRN(@>o9INqTUQKebnBS0Y0+s`3Gf` zrRi-$08!~Uztb33dew}dWMv?3Ee#0HIuxjfvFAXofPv0BZY0Bnb-Im0g7cpTK{U!F zWM<@ar9E6R>uE=Zdr1J3Jwt4S8TBdtcKiK1k`V8i9IL#RVXV$_6M77`qc~s}0geJ2 zAM;$FRZsXclC?VV4;UBW0}v*nrzRzpKY`|6%NeL>T_CsK7tmkrmmT<`!-=}d>rZOv z$^e;ljJd;)83qKTdq!R-9OH;rURfMclOoK_wfr)Rp%|AtnvaOLn*p*lGh4R;-QC`_ z?@_JE^u&WvxDlKiJUbVRrG_0K&tkG;%EjIIF`Y8A?k7jy@oRSawygMX@BaS4*Ndi= z%i3UyQFP+TMubNkQIHlKxE-jA%4OJa>Ogg?WSOO{Ae3@jFD<0cf+!!gkHLa|BV=U;(NDKSSat?*IY#DspVzza@WKmr&TDl$bFHNdI))=N?tQO= zenVXa%5|Xo@n&$}j0U?lX0X$OArpE*E1NT^VS+gI^GTOd|WkZ z$RyjkGB}k`Fse4A&vbo2DjOtw-)fg+DI1=23B@ZG%~|{?U)@aaID$y)P#`%dVUWn!Wn3@>yA0}%6N6&N0Ey&8N;dtPf!VUBH9k``WX|RISJ`wdL+mg; zqFn&ACwzV);1W|&rdxW3q(|oeVjXja(yXE7WY{od;8B@A0o*!HAaG<5GSgWD_6*i* z#B&QJ_a-1$vWQjZ5G6exeZG{QjPjkzk@NJ1Ei<~L2!ll~@yYsBFlncz>Bw0glqq{f z5W`z=S~d{EQR$E@o}N6Sc9i^YytYErX?p{v4^~4W6&Kq8(RikBo!4UIu^fmBRG5gC zkg1=5FBaE~sW!2%R_eX;)x8((s-`V4^sA4tPEqg1B;P6;s1^;g+2SE|zl>_vQMyg@ zbi9S{N%xwU*^T)HDb#;`xcy;_l?EE8oFwg3e);g7-ci*+#)191NpJW<+GK9WS2&9= zwBiXqpRkB{$VVkA1O13E5}-Ts8H~jg&2~helo9Y{sfcO$>!)%}WvxhH;-hs*88h z&VMuf_4eEQU)=d}#cSsI4JM(5YP=Sq+0(z>{F6QA$K{mAHF=~zx+!P_(Y&M!p<+TP}1MQKuwGk)%>L65} z8wSQabAN@BRQXX7RNyk7B#sz9GHSB0!cxvd!@H7_HEsRyg6^7H$+7y- zdd~LU?x$wGtaxxIrv=N#F+tPU9D>Q*OyI@9>L21QK5cYw{b;e+<5in4J3tEL7K>u7J9IR-2 z#e<{Kl<9*3oh@lLY56jTjpfdzHm=+4s|*54owmEHnc-G!EyxmrPdSTVL}$}b6|Vny zMab7b*~J^ad@MOwbQY!CdQ;TtjXq!?5>d7w zkcn2;xgbpsNt$v^a1+PBERJ1DBJ-nspxx!=1`t0JC>LCx`q3dUMzYE4GLS60!mu3A`E#gD@^yyZ_Lnu|Fd-> zz*mCm@|x08dhT2*Q21g1!1MY0va66Y^M3?&Zb_f1IZ)sJ#Kf)q6flF4PF=1?e+*0x ziqm!Gn4v0-zNJuXSa0Q;GlF#nQTLufX4d{0|2~NC2&pCuy-#_WdpM46-_`=V&xmq* zyf)T+s>ui$zRBd7xqWRhz0kN+DpUv*?a@_+3q~9@2U72cTzB&9kC4QZuMBs_7hIVj zJY5+Aaq3oP1kkoimp=JZ#)+*;$#x$WpLg)1MHz527x?q?taeQ!WG;jo5LYm@n>Z}G zPIe2F3lAi*u9+tW1BDZ=KpG<#v(R$Vv1q=)wIDV`w%FH#w_s z#<=1$noaKbImWY#l_aHR8W}=C%JFU9dl{n*`)Obo3aLi~wFQT#wi_{Xu>jsz6sNf9 zl8%#ORU7jtl^(tt=a%~F6_Q2f50gtZM)zOGfxB7~@odT0|92jdmq(0I0Q72|q=@#( z7iaP0=0>Jj=Mgd+btY6VzulRJIY7hRI$;?ow%rg(QNZnCTw&M=!Jiu?et+rZ5%Y;k zXei4sZWd&;Dz#_+hKHmImRd{zx|8ZNf*KSDO1HI~3N8=YWb2?LT~f+Sdk!am8LMjn zM;tlC40U&O$Hy1u5=KYa2dSnyMkucI-TP=GRnkUxhT=@@;OgG|=^_-7QQ)g{QJIZ{^$_dl$X;Ga=oMK2yM1|xtusHzEtfjU> zFpB^Q1u#P9S}NeexvOu<6P?GaLJA~P*HdYubjc)pT`oR!Ls6(hJKLotN3Fwkgk((5 zqh}C>$nn7*B3}1+%W(rtRI}j{dv+VbOyBnapSF+33W>@-j#xNt^?B*J)bCPL%emsD z*@BWk8@|$tNWrV*=uq@>Xd2KO>SB;8heoYp z)~TJ!!&w(;H$6x~dtl?m8WY`Aj1~|bB_+8`To4dV_u$1_JN4khG)qv z;t5V?kc=;;B@TgW3U#zUHk26(lY;?bM1FSr^BzU=P>u+>8S;L3T`tzbrA97j1moSE ztrcu#l)#V9c`92!w*$_j_|Sj0LJqFaZrob--Z;w|H8Szza1clXBj)1@BU;CO0NmFjLo%dGgM|aRARc9Y4KgvR!C!@M#iHPWl0(EKE$TP1SR7i1>@Lc9_VUGI(N9 z<1;u0V6MWNZq8lyoODh?!%XA_F~wP49A`=hxK6>N1Mj5K`L620$aBRAUc+*J*fS`M z26jt~BN*eH?dY7^yC3|rTs)&2CFV)T8YO`ZufwHr@qcdAbA3J(5ydF6V{NI3m!QK+LYn1P}&U z=(VZy*PTBxd|V9lGKSVohjz9AG7@kXC2br7+vw-L!1Y4iUjo84@5^0=n$?;d`h^ob zFjh0m+sE#pF@U3(LC@1>p77JfFk8gGd_xAGb#P;g` z*LRM?k&?lachT3ll{r!vri~B7sU%8F!Ujpa z1p&&E`6bdGzLZ0Yc12zz_yp zUhDD0{Y%t>eXVurFT`FcC<+kQM5pF9D#XIm4??YPY06q9GN+S5dU{h~=eEd68=}^o zKg$BHWcjcw0?Ejr4JH`inGq?JE+*TM!raqQ#ty%fD3&4Z7{SwuTvRh6k{ZJ^wky+c zIj#@Fp{S>p$ZfD_p+1e%*_Jx6o@4n2tr(@#j*-D(HMJoxIJ14UuWhS@J-j z=8PsK5GNhfI)w;3V;*Mk=wHd{;B=cOaaprTcSyj)_8ZZNeu>k%l$Y%WVd=$~QniW) z%8sF|zTKB}E{oj4BM>8aeyQ{&lAK>R)b0r^95IVVK{?hm2^E))y1Evj6#DDXsB>*VNEyF>9jNXIxo{Ob6%XYvtLJ#1;z`J8~H69~7-r;3xiqGFbfTZUOeMhGi)MgjUvDMTtKMaxs@ z*6}{9G&oMM&`Er8ygZ25fFiNz-xF;F%QLg0Dk2UCFE$Q`kurC)dxWAxs@YoJ_Z5Sv zTu?tztW{qI=KW>H)IVQ8MGvDoN`w5%ONhFDO+GBcHDXugBZ`sP@!!Z0IkQl5nYIa) zcK{MV-6Uz;rvTn>1*Od;dS)9tvAGTj#xGc6dM|e5;jlXCkjtl_BDc%)UElPDJ ziavCTWyb*ABY}d^6ITK#fpECpMQ%NH#&q@0iE<2)~2nj_~4e?0QG42*i?vDe(>4Kr4;)-yqH8qk<3p@L;OC*{CqmQSPSa+EToF@0a z70?^B< zQwx`n)}8FE&P6--PmCjMLu5E7DQO|@wqEa72WuG(efksxt?bH{D+N3egP1gdV@R6! z6%_PIKFt$f{|&sPC9C<9y%+#*wE+t7qKOFwp5J0cS6Qt7&^@BqnBvguS*PP7%f+1a z`~r#%D74G7=ft%LxiWxVyF5$)i4Wbul?MOZJKvbku+M4DmT206{2&@K1u0xK`o}*B zdgA@Mx*w;MMQxB%2*NYig3A;GaXJ}+Csq#}MW_H2n4l0`{&)24Ovs5v04Oru?NA|9 ziNAJbo;USXofw0?w0&fA&;-0RpjO)|Rcy)CN2(t~YsxT3flq2y%3}2+6_&H4DFv>H z;W8x0701(;>qGe91m8~yK(ZIbyJgI^z&H7_eZ=5Togx1ZN}gV{*m>$BDbXRhf|VAk zLS&`uFPzz&>V|ZOY9y4ssZQU!!J(T+Gzf9UG^X98joA|2%~#*nkhZ?-vbIQILv8|= zGG>y@89sbhdx9Uk^9gI?*Ba0>vlg&*8z`LR5Y^Iw%U!R+aHRpg)O{_rA(d3Ij{mu6 zp_Q{NBXjXh38jQ&lS+h@8)~w=0jVJ9-WqdV3Fw>OeHmYA1ijl`p90$D@#T28V@Nib z@oaHz6g1&OtBoSO&=nd`{JRM`?g~>KvRK!+?Y9~L;kVP66z8NhrTEd0G7+XaJm8(# zS%H6R52vKocf{za3U(9WU*Aq~^z}QNF!W74tW**@rqskv2{EbnSRHTJ8z^U~o5Cl! zYd`ARby<6#519h$!`iTUde3JqTc9*wEACBy0JQ$RyRXk3t2-5|(BWJXgUHA|;jPzd z2cJW!Xwu8Js|Cs+jqLyF7g&6Y+YBu)>1l5R9Jj6?>6IXBNO+Wp7@Gt(eLQ`%m0@~$ znPe1+Anhx&fPT%M@xVtx*>_|5W#Z900|s7jWdEK9BMhwXC=2mt+mQ*QPh#ppN`4d! zqt^4ROQESTq!3l*7uN)0h@s^CSO)H-99m(uPCmdw{n?6-a-tjtm16%UFi;t9#Lp!* z1SxdZ9vx}E7PGr2W;a22AvrHBr-+jWiube3kK1B`10KXSz|mi;sR3;*^afA1&iI$z zVweh{MTvy>SWti#j-@m9f~_>UlyG6v8n|T-Bdb3MneWH^@@r)~cG$(xPQUJ~$FOh0w~;5MVX zIpo(rb#z(5?GLxVuBS7j^}n48%wxc(;`k$91W+D|*PVJWR3ak|Oak(T2V^>o^NiEg zBUb%#;L<+#?Y)z5v zq_nxMzs)Jp``OoVwRFjsQMgNY29`l2|@DE-uChP#~dv+ST&~71y;?^Zr9(oHh_Ua&3Wu zKh>gL|FoBYCl!i>DCQhA!k`NK=H0J%KaU!FBo6ARB|x+dQQ=&m?n+k*y_1SE zHS>fD$$Ihry~zjibCcjaSr|2atra>*Z;-nXjCdq@Wvx$X$M`9d;Da~ci&Z54LB-Bb z3Q$)v$-q6e@(C+?(3$ufyuLzI&L5rpn~vux-R4TJk%k0RZ(V6R?D zAL?9lYHW&=5CunW;t)35CfL}x0Zmquj{c$%O&mr@m$>SaHpOlM))Dm8uYT`UydHF0 z@5Z?>!Klm^&ZiITecJZXnC&!P-^SgztT~{dJZ2}(fM2;>N)f4Ws)1y*%b`acZlo+v z!^c(SN|o^$2Kvp~6F!$MLO=+cSu*6#f?p(;mX}HLyx2@5tV-#9ytpUl*m&0$5qH`< z81Mw5<^)FOE+iEKK*8zNkbt%|EFfqLoVt?>aF-?6v<%Cn8i#r%kkuNYs%AgA6(7Ti zPfl9?=|7H2&Ehi!Aw|GZOuc9h)O_JD9OQVrB`*hoxnehiTAe9*Z4LUW^Y$j0c(QE7 z(}?F5B7kt)v^S+Q)+W+8+Bz|LEzK`goXLvJNIm?Y7(~WlF1grn_x9b-@1A~xR>@yK z^|V$K-FjiKzU!=OASuH4`GW;+YnsTVU%gE6qJ0#}F9I9kQ882s4=_32)gys0Y>PNd zwx3lfW!t~tSxQUcHWZ*s>uG_H@2X$VApTL|C-Cghbr z(4RwdazUJ7?i3m@Wu=T^bz#za;26nNZV z2Qv6_@{KWQY_YjJt3(W*z{$mPRZJMhS()5YA zNF0Di0r#PRVem|P87K-!68vKQ{{3Flp=g(5{Zd-KfPX;s1DboWBY$9g@*AhBf3FFQ zdA&-J#Y|m#7G9Cddg`lP;N>2UmGl!PkjG9#soIvEu(LwbD-DIb9munwUTzKaBuDk6 z%yv_Y@3nPA1+Nb>lOYk}Xjyy_80xs1W!&(rUQXRIYBO!rgID6Xr6IE_W6vtQvlq7OyNLoV``?8QSq%yS14e{CL-~Ie8)2kw45%0FLE?Arb1yX5EUM zQnzBh8inP@1Y5BY+Ira0c{?=@S^9?1&QrqmN)074$Hak!&cIPYl6%Ft6FvNq=q?AN zkp#S2vC4QFKfihPWEvC5X7gpsZ8%O4ZO)hS*%%+TdUtOfH<02mv`C%+8_jBF390ym zw2CY?LJmTcROSBc&OG;IjMEf4E;Qr)0^;T&3T3s?r9qJD=&YdZi;a>gKDpT|Gmr^o z^IWjx2iV^GYA1dp;DBV8Sx$lb$jz0*ixJ~vJ7bfX0i5a_csxiL4HcmP!kLGDmv>hd zKFKGuyA0D>hb{%Devw_3wBMHfX`}Ah2!*EU$(c*Az~zDu~+RN0aeXdmra7zldm zc`|f^-H3Ke?r5JpWS;Oy%&GmIm*a_rF(EJ-Y(UThF!>OMX@Ha5It(&j1F4?$-oe1Qnb% z@H8TSJ6#L3q#a=9I@b19+}?_PwbFk}A$5wa%D?Hh^>T)DziJL(o)WHq`OkYNFDtlx zz6tTkgW~wk*C3(*YKF_@vw(x=%MHG2^O1;f^@|_5XmU8jc;P{r0^bbsPfAU$rLzQu z2b6ju7xUM@_fs1v#^xgHI5i~c)c?BB%ijcsl663TQiF9#!DE+^Lkl_(*R?X5^)+`5 zdv}X5u3m_dE?h`OTs}?wo8e#=mm~yANlf0G@_G&h2sbr;)nq=CZV4v>veYL*2~Dnqy!l;l<>scFvQN;(+OxP!T! zsfH>r4i((iZRFo|CwQ(pfd2MZd>k=6k`16=nlK#O$M#36+aK{b@;2 z4ldmrsufM)w5VOACJP{;xblacDifVhG0ip2>WyeRBWjL6OCW9jYS=ovcpl8gONctK zoK1?-&xh<#dfewx5v0-M~&t6UvymdC$5Y%LMlD=bk z=i!Q;rgYhVrDB#4P7YeMOOxVN%zz$#K@MtgI_>GE6}0ab&ZG6O;y`7#!BKm%wrjakxIThtVNg zcX-YjO)t}H7_jkThnZvE3$^;fr)U7y2GIX}{hR+iINLq=JtBvHyBL%I= zs>*Sg+IA?(d|$(UiP7gQHc`px7(B>lO$+} zuu5Si6IO?AwXKzQt255qw%A#3)Tly7%QT`cHZMCLLSkgN%U?8bU#6E+bf~ByA5vIv z5XZ}r^_E+MZCH4?yp2yh77sT=*7D+zBTdG7qZ=%cxFFs#vlh<9Wdnul3%n;<8`)Dm zEUzM!y0u3rK_+?{+XUmTN5de5LrWD$)=(qsy}uICC-{r9bi8K7UBr+m z#l3~5DoL&(UW_7OG@+=&-Y#295e&*Fka8r#9B3}^pt_k21Nl$_kIr2Q(pr6R zKTc8qZWKr0-Z`6iW5=)0)yU7#DPue6(}2U6RK*?DE)vdXD-q06Meb6$ukeL_;*&-s zcNq#f-g(iVQ)VP1&B7eT+_wGK-8^x1O+>`2ov<%%Oa8z-yB?iT22<2v1R(y^zOe;{p)TYb3;bue-8B9Q&4%0BQNiLnA-xY(`Z}v?7Aj$@| zM-OiBzt|ic+r1dk?WDeYeftOGZ?xh?cyFQ+raE{ z+@{iuLwM#)o3#VX*h-*Xr1(jfCmNi|hW0%=SCGu892HZi@UPYDg9#K1suMr|X01fO z-L16mkX75sf`KMDVgUFL`x%(3IQQqJHoD~DLWx3r5@tTybjL}q&BnvK3q1E+o?eX8 zBRTHb{6uFel1?qjcA3~;nq4Xl!~0?m4Eor66Lsdx6s82d`hDJbLaHTY>tnK`E@})SE=Q*>H|RAin{5huWS!RZ$%RJG4Y)bfV0s!eWn}2ct{};1~$iUs*lGnAfuw^ z(CogZ#h-gYn3y1dHxf?NTdJu(bl+?_GSjJl>ZAydx2*}l}FIHsJCITTftyGy7Sv#x zzt&?LNXZqL*Ch@SKKwGEdU=+%N>JoS+86KSIA^T&I;1K|vRqa;}tf)GR@e|VgsQ< z-0(RIbgGH%$zNk5Xm)#adYqiFLE8amWdXLFX&dJ>*&z`;lj4F+fZilZjdIkYMSqUR z&kclzYjPQTdc2DWN??Sr44k(HTGI;eLbKLe_HzgZP{KRPnGsnC=!M%|D-1J7)vwof zNK=tvU}|6kd>_i+H*uZFfT?eq=Qzfu*wHTHoiq@E(lH&54>uKau?W0}uf$nmJcmXo zV*IDwUh0?icr3FTsK@c0m&)7eY2ybAZvDG11o5KpK=(8&1o~YXXp}QwWuWE zB%nMlm5~f;2*KsbY@UFt7({QYcU;PaN3eOYtSS)prJprZBOwL_fKN22;Db;4Pn%Mb zO4Cx`aTp7i@NkN=HXOB4^|?Upes2$sU(m<}jMl3Q10YHiwBspB^kw^G@wx8Fj76yi z+?Pu(sjSCte0J32u7K-nG?R2gKG)eZ1?-?rBTEk@MKhi>Tw@Xtgh?6Dl^p&der~#G{NhHIAdUA0c)f5Rtno_-T8C zGb8Ym!L@(lBsvE$e);^?oXk4)He5gl5{}>u8cX>QzJE05g=#XLuX%y7q;fQTzTtf6 z0swpXSlE!M=11DbW}|eW+R<4$xl#yvm3<8$Yb?z5qKX6Bx-a^hQdsm>k!)bd))jw& zkeBC659$GAav*^+Z-m+8?#;xmHsLiUF(d;F!UIqx=PrKmP054ss zi-d`3)0+r3#Vo3zTo!mtW)yA?vl4Rc{N9CYvnjR-O;NtQ{rUEfwfW}V_oKPYd-rgg z>g1z`E&A&MBh{*-rP6j(ZyZ!P0 z&8~Yl#59TBIu$oek1k+xJQ+6iVla2|>`^aXD#B|nI>p8^^%^0A=4ijPH#%9u2pvY< z>AIQE|JytM&Dt0Kwu9c|&we*SYce~*3^kA(7&~I>slAqsYVte_V|+(uW}G~(TtT*9 ziWJ&2g*l+^I2O9F))4>o1Au<Ns?^WAG$nFBihZ{k+F7MPoFTbqpe&IFA& zWd2XfeBMvao&ohsy}{a{95>CnH&J4Pn9FW_@$k#LA$FeMn%iG)-|V)wS93RPPmho5_Kuvgi`E)Q zOcD-k4+;x|H0Ds&P(S9C%qr-3?G1XPr~mx~0fy#FF-&rxh6C6bE8Dc*og;T&2^|g~ z#Zu>WUaDE8?fY&q0pjI{jq%wdAklZV>=YUW9DhwsrgE_6F((u#P3(z`XzaY2w367Q zey@2y>^ELeU%cb8&FruHbT*E0;_H<~%){nTVzQxtXhQu&wPAGf$WuSqgSl0Q10s(P zqQ+d#H2YvM+v^l{uxp{Klw~}r-ctq(X9!uW;|}w6{KDNwB1*F=L-O!chXJKssurt| zA9T4EoFPaTrc2UQ=>cOepkz~>W2f-Sf?3CLmrTU#L~y%cw);Io`GE&WOI`irv{iJHUzsg z64zo!1R}iyNWn3dH$R>fQn{-mb^i>RQ+o*n$sv?4@7KlAWWCB6_HGG@>rIOdhBGqm zFph@ukrT2vnDl}7-=swHEx;6_gI=C=G_)`tPN>^_i75&k`Kt}bTalC82;D_bUi)WP z+`}Q-5?)h-fdHR&2;Yt4GS=3c=Go)|CwZV+u|xfQ^(e_7^43Ai%-tA&KWwZy(cPc7 z+Bq&4doa6Z-@xJZ_OE1UbZ=4staS^v?$aLtIU$5`K zH%a*B9simMOsd&nBxx%l^bf7&w%l|@L=4n4fq!UY=G|tSq_9i+G5GI~RnIsF z1B{g|8N&ba=JuC;U2RQn7io772b04wn*uE_Hu8`n;`W8s0a*^W8 zzH*3Zp=wt5>7a+a=e(${=xO=UZotzOhA-9SAT%pkxqhc0xcp^5)CEWPZyxS{z5U+r zjiN^xGGjC!FN7HO;qateN$H4A|M|2+YcAw_V6qcAYYp2@G9C>%hc!1st|!mm?2CbZ zOZQJ|Ba6Y~)wZ#E!ZN#OC6uLu zdF(axyUDQ1K|^@^V&jM4^Nunt*@mj;rl7*4^~E5ekHl+X@5(i_Ydx7_>-aU90_0(1 z&~qI^_Bb@Z$uYC>5&;Z5Q@}f1LLxcFIwqaT?`pX8Mp9N`BCh8w(pJXUM$rlDEA$0` zw%<2Ct$8JgY4LgDMY9EI)FN#;XM~)$&k&@iMND=!YWn^tm!-OFikz2ECrMzUAB6%` z+?!XIglZ^>hCn*grhy#?B9+ff*Jh?6 z@T@DQGcO38L=DPN6LjrVI!02Xabr;<(1#Ry$IZppA$>jAUAF)aOYMdL0!i;VRdVen z7)awVhEkhg^z+usmTtT3f;P`LzJS7w&(XSO6(*No;9+~Y61m~kNjXSW9V`T zrB}2(K6ul?ekrLQ?v+lS0>~5z&(n;{om?bcH`h)lu0j_uM;d*{>k+jaIb?249h|8* z_Go2Ga|TVv6)!m9$_J}lW#PW+cA8Bpj3{ZgWe8xU6)PHq$>D0yBC`Tzkd*TNicCS9 z9hdQr9Gw68;Zn~=js{*29Y)V7LF4&x4Ehn6u+nE&Bq2SJbp#ygwH^REdSCAp0$T&? zoh@PnPE99SI+c=0L<=5xA?+CfCA%y?JUefs?J^CAYPbK)GnMh~;-^4I@Kl_6@zogw zbvwphY>(sYPvEc+-RRO`=7hX%d$h-!u(`^WhMDupId}LJeA#oQUTeedEpx`+ESl8$ zmiKxfYZxw+uKnO}9F(d~Ckt7s_vOpM0BXKB?1foO~2{KVA zN(Hef7X_na5RI})I7&zHC?5obgx*5mir*MWOP8r=HE4>K*^3( zQA5S9xg0A$uk%QOk`V$OBY^WwHrYVP=zxsBZ|=Wu?!VvfZc7MA>d@F9g-`wAG>suvo7@5^xtp#?;rNZ<)&G2^XsPh zb<_NMzrXi8)@239t~SlDo95Ts{p}J}tysn8+OaBXsMs}^^j>HBZo*$T;ja(-<8l+O zxcOej<5xzmxcg1aN(^;dd<9kWKR<2$6_Y9DirLNzF6V@P!U_GP6Iyu# zc~Cdvplw8Pz0o0W$Erpgw2d?<8%eu{I*8k`Dr%_MHJ9{WXZnsX=o(p2B~j1>Nl-L` zpht3`=7&-9!>IY;et++GjG7fx+;OaG)ci1Nez@J=E}>?{Dn`wYRZ&C5uDN6gM%V!K zA4NZmq8}di$K}Md;^xg}ezTe1-0$!Gj&)hVv8zq`W|O|T-QO;mZw1G$Hs2d6cFpDH zdqd~-rR(N0a;*^x0QZbfS(1yBk{WG(z;IIb`#`X#htH$|G z2gu(siQh4a-)9oPV-mk>4Z$F zgh*)IH55YN`fZ@!*7vvd{q1&tyQJF+j$LuFb57Ugx8=R-^2g@>V?zAnet%r<)(VbY zZM;7=-XFL7+a=?z;Mmp3_=AdFb4kbbqubE>$JF-6!~VFO&Q{#~F?;=E_WH-;{#K9s zSaI{+X!ve4e0RUU_d5pu6;#}DtZIaOH$uL<-QO-DWCh2rM#y(m?3znDt{>e-$af>; zyNCU8xd~U?d^bYA8zJ94?r-%Fvf}0srJ*>GhXR=(Ms;#gFiA$yC>w;MbQA{CP#nqw zfhZBhfjksQ5>X_|M4=!R#gbeQ%vF2|!`S7qmE;reVn-3%8!|3?%xWCmy#EP2_5+N0kAsrDS zC6OX65(71oBL$KG4H6+0kRcrsA|;R_Ez%$nP$3!70U>{2r7MQt+3Aj&>%71r{4@OX{Ayw+e=?i7=Gjm^9sJYHipuQi*MH{U_Uih*}j z>==|cHFX*Yv~WtbwWDQc98_Ut(6WNV&Roq{AY;YEPPqbufs7R#+|_6x*9D&0-<8eY{jZJ z{f<>pL&dJS+?+pd&L1E4$K~c+ar5Km{Bd*s__)8-Gv|t%e{9ZwY|ek&@9+JN&AEb# zJC0Q`=ZaNr&K;|whKgNtxjFx_IsfsnKQ1@+ikp9I&VOvqe?0DQ^~|~A=0_QoQ&^SP zn2lT6jo)~c*|?Qm`GsLQmSuT{X}OMV`Id1xhjn;=%k(RTxJ|#qDq!pMl%7%z+mtKD z^9nA#@~$^&;MLgM=^)LVE4Un_n-k`Dy1S1||H$-@oBomM9}zpHm79L2R6G4jPpN>2 zM~HZ&Zn9q+swVvv9Pe*P(a6x2ppl=Mo|&Dk;A&NjO9Dpb^*iUBu6}g?KC*v*+#i>t zdBqJBL-6Q@T&b1V6#LdjixVl<8SZwJA}XP z#whw`7X0Vir$PUn1?#`<_%o^Em(cgk!_8OkZ~g-?{r9H(&yn?N+cSULsQ+sXzn)w8 zUz_Q_Z*TtNo#Xt+bo=ME{15Q**LQnBv_@A#muRQ1{26J7s5-n{CU_>toEitl&)#KHZaex~@Af|ft+s}Hw7 z?Mn~0Khcz%-20Y$9u7xGhWl>En;mqb4_)?cg4;V9+&TZ<5A?W$f;&mLlZHEpxPyv2 z$+(k_I|;dyk~>Mcla@P)xr3TJ$hniAJ4gP|_|6DTcMx?4ReSGg1$5nwu*Zqz5xK0W zT*pU5vtk`r+G>P7BAyjCgRq@%+Ld;Ju$^zNv&vn2XXun_K3J1k=G*v zdqiT7h-`%o6&e?IMr^x?JO#Ha2}E=|k3@yXl_V%WuAD^ya@9;yiVRm3tw?bfQM<3s zdnLU{a(AQ{S?*k#k>=gLM{@UWjaO7|n7enQcg1?IwAF~cdpBBF*bHgzd<3kt3uNwm zBd@fNO>6;$cke11-$5LI#NE9cZFhS&ZiW5nzCRjC6;ecoN>KSBJ!Gfkl$%mhW=c$X zDJ|fJq?8jK5C3A$8}|`h1=>-(LU;{ zDtCqW_fcn5dH1e%s!~ay%-21J#2?@*=UT8SzZ*h+`u+Qv~V_Jm%@3wYvA*oC@4 z7v_RohzoGx4G}(yw!kLVf?7yJkdNX_n29n$hBWzd*2q$h?;r{piZ12%Zqh?`BI_M; zQ)6-ARVBDWB?LU*|%(VZ2A?I>-HP&V#WHbwAG0C z4R^7^X3(+oO}5f5kh1fQyV5>3u?6(}hPznN_zvRuBkDKY1>gHwVSl?1;=k!GC~dF1 z*p<|&G%ji^f#_{jk*Malk_2_fm9wZluIenMC~#HLiVAlTwJLE}(u*2*F2yKv=hCby zNAo){wxV*wyo25qXh3S;fw6bcy255^RYF%|7Zod&s~M5+z}P!TTw#lf#&;0MN5#Jgo$nrw`Uk}I0Qes!tOsQE07VZY=wX6-Ku!-(^8hgq(DDE& z4^Z*|ArIW-9#lMdI2;`rZjyP}{Cgp}Z#%BbZ}_nlYeIQG)@xT_EI-CajltObQe~~R zgVk+fZq-+e?P{b^S?7_ri`t1X{n)C+Sbl6*BXuPh(~qqj!7Cffp}2PTC`M|(X%m@5 z76Hb7(;hO1tRZ8_7BYn_AwvLTzi9?pfku!GWCB`%1|a^0zvu^JzwOJLzuiOFy@cIM z*gb;0*OlBW#(N06M~L^@>AfPnSAzEn@Lu`dE53WBcMoCr$nIXz-NW*|g4@qFagNyT zk=i|mxwp($AhP=rws+)rtl5gnb$@0;#wNS<(0oGMZe7xDq*EiMGGp7k8Ha+tA3vg`M#WyFh7t z)Ob?bg%s)>xR3zd16TEpOg#jZb*`bL>Lqrk)={gYSX~oNiaVbiSIRTqV&@FSN@WE^ zw^reUpMIMo zLJRw7Cg?W=Qr7O2XTk<-LI^hmu7qqu;7W($+Qzu2o!TF$C;8;A)RlP>Pu@v8SsMbk zNRrBkltx*JLPu7%NmjWkRb{F~m1me%oLg`}79ZCq!|WOo`A|7WsJj*(6H|AC$1c_P zK2`WW&mh5L*Ft=s!sCwYpTSS-U~ZB@ZSdHXhoVD)@JP&SBs>!It{T=s>vXsY>p}e> z{Qy0j2lIpS1M-9M1M%ekcmRIzP4+QLBmYnz$A|VqdXz`FunyIMS}g!+HIV$nv$9X} z=5s$Nb!Bd{yLA)NthTmb zpkeDRJ{ZCk7bV?WbL|6IY%x_?Cuv+AQrbs-1;nnv!#*JNH}*h)a)I&5V_;qtc3sY5Q0%O)2GyF8?agk49xSlE@A<}*%u z$3`x1cTcCoNrt)Vc+og_1vd5(JECJ(DiMBMrcuJxJ{Z!K8l_5%6Blc%Lklg}_z)>E zv8(#Z!miN5KANe9kl(s(AGMbDP1W>Gh!$MEt%PjXZz~yU-}|JX`O5RbP_E)A8KZUF zKJ+L#l+eA^8no;KV)jwre93Vo$CKnp@*~0AMcq1Oh{uNrbr*KKqao%$SgNcBqWygp zXf?lWf#(XZC0=Vh7kOL5(M*`Q&f7VJn%}NO9Okzx9T{dwyj$s~p6+65{x{J{e%w_-Q^EgwNAQOwZv~8>V*&Eo+rg?(+;1rgyDf?(>Xt znnV10Wt;iwU54r7r+0lQ&_MIks}$4u>0LFfqs7MICIihrk9i*J%s8{nW17b@k6|9W z%ujC_83%`f=#TtEeH+K39szRG%xwL%P3<+C?-;|EeO|#D7_2|!9@Fj=ts6RRzh^^%Y8JHL~o9$XhMi~ zmASIP8jGP=X|AAoYfU~F%GD+%W3=Af2SjfnSy?BEULC~jqrMWoD<;}UGePv7M6ayf zDbIur*n|-63Unnzy9Qk;|G>j#D5b0h4_BhAOVYu^#pvoFy5%Sz3NU!=iuw1UaDvCK znyH3L+7;p2)jCNBk4NG0C_G%9u2q96Y2g9QB07pASU5*+)QV+k4ard&q0t$cQ5lg! zSOiC*1P0&8lRAkLx{@YvN0`6i_QO6V#}yw|yyf_jg4^Hk_=$u2liP}KDQG!~t=O+4 zkXyW6X7P4;#SZRyI2;`r?z7FblGn(kUvmV(qo>9fBrPOYPsB!vN`cMhF%DMpq%mu957pt@s?4XBhG zhG0su-UiZ@BX-fy!p2a#iV?8VE|9tNjl9y4ZDI?gNw}nFd@N}kPVVSE@BL^Y&o?il;>kz zcZDjG4IgMj>8wPRD{HMPcZG`fQD0TLt4%{?olzxAK+D>l@=Vx(sFGb~N>r)5RyLMH z{on1q3;QS8b)8ioMZpjt*b4Vs;uuhHq(mqRW01!rA$`+y_rGuSzuQ9x!)NDx-*v_s zjO9m;F@fhPyK7aQQ+w~TYM))F&g$Qc#@>v_xZ11qR(UHtGlz@|G9J?r9naAm%aM#@ z6ninMwbJ-yl~$ouXO&r1R*{)aa%m>ZWA$YYtFH2AZJE0==gy2PhAL;uIj(rdvc)#{ zotbh6_Re~$oZQ^LW7qZ0?3f!X$-J=5dWU}Fz##P|;-+>J@JYFWkU)n88OwtX1_N`E z3fII1i=0Uo@=>e_i(io9LrR?n6d?cHDRJaYhLyREoSicoqt~p_ilFnxwKe7`0u{ck z`Hp7gZ|6puVN`n!xHIdqZ4&8=uhYnNjGaiXI!CbtA?dGWKo-re1W{g#-&nHVzi6jrn)M$m#3A7W}MpaA*f?*F1HMye?QCw?p7z{Ot* z-m!0c$F7aT`?7Jr252c2cWQv9-t?a7VN=ObI2{cK0e%xK{5_=F|-ed z#M~%~y0cLfbabN#nngB>LT07VjiNodls`9$_T(cmHi|+PgT*gM@g>Ga5inPEy-Z6n~AvH>lAiw(%Q;;-K5$))12KMS{=SL|3 zVrUo+%u+BSj_MZy$J7f5F|_v}ACA|IKc{jn5Gr&k-CP-s>owj)r^2@xY;z zKDzL~>Fw8080x{NMbO9B9+U*Hy^vWveC_SYB`SVe?8!$qd|HGo28&;i;!BE83m`3C zdjNL?2M1;e4iQHMhk#>(1B4hlBFI=`bTBNKi^RAlFj(YFvXGd9Lslm9$uH$Sr`66*(-1kT%j9()G@4iJKWcjiL8OxLo2!OdrlWQV_Mb0D(v+3tYSp0$% zA98+qfCA*NJAX8?_{yR=EQ96G?3ueVSKi86IrBrq4I@QF9@2<~!^go4nIE%bZp@5% zsm4qTn|VycbHbIDQ@PxDayH5i=v0EUZYpUxMV#i_RMKJ?JLl|F`k@YIckm3jfKH_! zYSF1IU*KXD%HE8}xZ11qR(UJD>aMa|)vf4Ob1S*>*H25t8_76Eu@|FSD~$;b%wiY0 zxRJ{*6`9%ezl;E_@>qSD!>X(NSzG3=%qcjyVyJSa;NXg9EL-gTo|`Fme$TC^%E_(F za|Fk8vtw?oB=dscc#h!Uz#zdP;-=sb@JVohkU&QS8OwtX1_N`E3fII1i=0Uo@=ta(2#Wj9#-wD}v4&*VdS)2sCxJ<~y2|znvRthEeS` z;Lfbaw&jAp`1(R{9b;b#uH)QP!dbx~3<;y)5S)S7Dmb`wPNND!0Cei&2|7oy1R<&5 z0J3OyC79pG>-T05JszkU#GJ$3GZJ!2#gPc8dcS1;=ytTF=>Pad=-g z?y~?brQ#+F(A1mWlLh-Lj>73^I0&${+JALKA6(eS2bgdv1y6W!vc|TKd77~`#=10| zbu~sG1AA+})fg>?v2)JQqcOYQhgN*Lsxih`K&aAZjkS(}JSDhTg|eOT7>{s#dnNWp z^g{GL>~+}Nu$N))LN%7)Al^vEF^Xf@( z5HySI3Wdx{p&LVcaw&iA4eiNCVr&nEEC!2Tkm5^>T_Rx4*e3$GBRDuPOK^xdDmVlj z6C9w$Z?PpfL`DS}7R;sMujJ?{awb_w%wJE0#c#yoLrlQ|So{bM0Cxlj2WANl5l01w zfMbFKgcv#^$XH@@Ff5ph#JDCfSmaEykeGr)Sp0$%A7Tm)paA*%Tui0EiwHv;6&!+3 z)Kn@sxHMx;C1^UDN-&o))>P6{ozKvW&Ug=eqc2%&Amx0A|do^glg>QLL&j zIYlW(5|U4JViTFTC^)!5q=+Cmo?B6|kVG7!AOi%)bH{ghM|bf3mwy<3nq#6P)iK#I z-I4G>S7>H|gA4^S7|2K9k%}&_T{mW zv)>*XPMW6~lO-B4Y;mv=ftCkb^Bv8;LUzu1hEeTX$(>n`Z6720HXvE#nmmF%v#SNy zhEk~jlAc!aWT@1NXpHF11{hOlynLP zl(heNp14q^8i_E6CEMoWlx~`b`Vvli0#19JCV`e={R9F&&OU*j4B018G|)bQoO}>IX~Q}76!b}p(|mgb zT4s*ZMv{M@Kyh{l&wvZK0VUoi5R`VGK(J7@Gj@5J07CtEIl2{kSuf|y`0~AMFV}qn zr-delp1aLT4owdshHRJHWwyMQ)pA-!%jaskbRs*X<>|Y#VQ5RyQ9$@@O|F9qV!~IYn^AWGCSJf-(X2Ftf2A2QT zbGcvU+s{2O>*YLYg@ls8vb|il*`2-$KB&}o@6g3-Fuc^PFmImr#(*9LCdi3eV$~R=u>erWJ*nJ689vDHTs+daMr7o zJp`Te?Gb1-+k1di8*UZM*!z#?87c0QZnQR_vygfNrR@(!qNMr9^90B5N25-LO2vuB zn2zJrin0Y|)s0#+mz*TSX}()a&Qfw#k}V`vIl^vs2hV_Gu>pG#Rdxhz{ZOiKE$RL% zhZ?8vCh$^tlX%m3i99r%X63hyH`fwve+wG9{ViBzA4a)1y_evd;v;Ay%1+Uj>O24C zMVYzrR!|K!rBM}l_5nG&PJ1$l=i^gNv*50GHBGZ3jx7I8SjN;ylB7g!2UF z0mgsy5BsIC8$>1nHPtMs_tr!l*ccGFMSNVoB)Rl42$bKL@>0N}b*CjwNH$Tvw(PkJ&`ic&Pj zG$p5A#EKiqIH$dctM4gG>>|y{fjCLppR+_s_k)(FjTC9dSilX~i>R2xO48{}&`>H( zK=Wx$PlifuipH4US9Ya^BE%450|JK0W|a$2S< z5C8Ez@097vBYjGbq2ydekMRgs&Z9fF)pA@%b%i{lE921|%aN>vqnKtzusT$gFnY0z zT->5o;X=&+@EH3Jj^L52cOI_=Z$IaFy830;I(n1mKPa<-jZjD&naA6mU#^f)GPX1{q6?4u%DDkr>wm28*0Y783K~4#*e9hcOOy^vV0ALjAcp(1i)OR$u*I|B4?6? z+4OTMEPg?X4>>=QKmqdCoj)2`d}YxbmcjC8_RL+GD{p13oGDefVWf!2LmIJg_&As$ z^J8|*jhQho)tG5PPGKUR6RxzJ%H_tBvr%?HrxKiXQ%TDy;xyl;k`}|*IcKL*i5<@F z;2CfMol5_fS#&DP7r0o3vNz)~uJ$UuRo)7(x~uF~bt}5n+)A$erSyn5l5vb;FGjUi z8vC^jv)Dy0ZsgKm%K(V^U+sfdd91$7VbxXstSxg_=G>Wa#ZcwU4H;KNDqC!GKbk4G zxF4;j%1IgbkzLnEvtw?oB=f?C_9Oa@1B29?h@09?z$fJfLINEYWGoLl7!1rsDqIs6 zEOI7U$VagzEPg?X4=HsTP=Ne%r^Jyr8CK>ta(2#Wj9#-wD}v4&*VdS)2sCxJ<~y2| zznvRthEeS`;Lfbaw&jAp`1(R{9b;b#uH)QP!dV$63<;wK6P$tAD!{mOPNND!0Cei& z2|7oy1R<&J04iOHH)};EvYffKp0J#9f^w;F!t+A%>6&GL~2Z zOUy-LToV{9awZT(X%6)fjyY?5+7$W3(8?&N)Mm#_W0@TJh2%1GUib7_k(2b%!xs*RQiuU9qF*b@q7K6nvNbx1cMiDS)Y!m_9(Kj5JrEWwV z)iwf-DI3t@x7dJ;EB@+@o?I&aN-jZzCB&37LKcI?FG%qr=C5Ud#g8BWAcltFz$^tL z;;4QRa7?{`5JQUu8B2@~h6Qtx7}o>_i=0Uo67$zGVet!6e2A%4fCA)i#+XWFiZH}c zjUxC&O{D^bOEcC~f~KRX1am24O(i`=&cuv0m5LHBC6*)si*Lr9N`EZ_%$QRNFKNZ9 z`jS(WVk9B?L?e}&fstPFWU2^_i=0Uo5>s#pi(io9LrlQ|6d?cHNg12@3`?4g zoJ|{z(JOJZB53NkHg}#PQ1)!icQi|&of~O}Q7wtwnf2KEi8f$-`AB;mV{^%MoXsX* zBo2)G9*KnUYb4;^w+MtRUzQ+anbH9PFc)cZO=PghnPg!${rm`vUy$NM&Myy8fc$mm zk46?>Su}@bu>6@lb64idTUje-erUL1q=?8v8nJNrIG7>xV|L7qnK3Wbm}y}%kBN9r zxYBYemm5#cM%e+KN^sUqB`v3j(|nssS`1_7oSjNP)Zy$7o&guosq{lFI+f)MT&zOb zoADS|dzIcQZ-rOgRd%bo72RrXC0G6u9K;*RI7YD-qgpGC2@cF+7rD5RO9cl2F>+}p z%VYIr4y&&6XKk6gGN<6+ilNGxf`cm}l`RCvTQjBLcxydXPHtu1A~@cf9dlzPnHL1d zTLcFO1_=%kHwA})Pl5x41Ue$fSRQmR7?_JxxF#-G=VkAg#3{DKr8QVI^B0Qu)m ziL;r{urjxivvWpc^qMtV5p>?Tw#Ga~psBMp-_fl6?c7K+jB2j|cV<1dEf@5~*B658 z82eIi9p|PJ&I%4;NEii&;0(l8!NH|-8dVSipi>u5&^d}F2uTG8kVUgA!2}1`1j|mr z0XWA}Q*Z#dBRDvql;99?S8xb8COANdp(BEfB}NCsg1JbHYXXBs&LoS9-@5A`&jVX# zEj|D7JOE+}4j_NrKl6u?6dVAqY_~XYQEF+%ZBeQ`}hD8E~VfJFHY9j)-g{rw#HbOhO@55=wo1S&9@q( z#V~fx8G1Bk*Za_lPgga@7z+qh`mC|mF%ZJwVin4E#$!Cf@$Hq^8_^5V`>@wxZ^K@O zy$jV?f`fP?8OJD&VUII@qZhl#S^t%P)o+aYABnL&6tWmBenE;aF?NZ7Ib)v);Ev$nz%0Qb;;7&da7=K37Qe-o;1C%V zWLPkliocShr^uOPAu)eF5f;A@iw`jc2LLez2Y@?*g9Eb!hlrzsL%=b?0YVHN5o9be zIv5trMPghN7%XxoSx8L5AuN7DiVrab2T*|geJ-X_!66KBRB#ABQB$el;L?mWm7wWp zD#2XJSW`()kux!4O{Ic^%QEI}pX=hAF{e_&0hlqT5?<1ZRrMvOD8)!Z@`+AtA`=${ z2RDcm5d_CuD=HR}h(i=)fZ%xR_zv&r4F2>9f4=bu9=Uqw@mlcqbB;G2-X|5nQ$b4x z2R;azdeeK%%~QUM9EH=-aL+g%ICRoSmlqxy_-PUK;L{@L<7*E}g4bTiEFQl0_T&;3 zKP~p;BO5*~LKcI?FG%qv#is?37Oy>kJA#7)vjm5Tqk==gF~I>s3>^_6enE;4F$D)ufc$eOWo+g%ENM1!Hf=OUuf)-cpsC~9+ZloDTwIp(9)?@1@+JN!pBkgsJ%_Y}yHk*8rI56&eBofB2k$`vKA`r5CS%QpZ zN(ThMT%^f0k-;Kol7-py^CK*NL5dGKzdS$z^4Fa|8d-d0(Hxe+@@MwUU70IyWv!g~ zq2Y#+A|elI#KPg@V1~?(*)ca}#=KNxriINsCgM5aO3SHSZag^~We0RB!C5zzw45SN z^KB|=F^rvab}Ic)hqF6)23$a=(hs%hRF*Gru?l5x#$#OVReGzu6<&2$*{$kUbgQ|Q zT=`3I5N{;o7{y+UYOORTI53M{$fcPqkJXnsth&mdwPo(goPvWZhAL+Y z4z7q)wh$ao&6I-Usr6Jjxs`d!uJ5VYF*jC{c|mYIMR0Inkl+w;Q*a3QBsf4wpd*5e zt-AyV*fMMBDL4R#DL8=qasSL8MpAG9xU${i zz(v9Fl)ctdwptwCmyP=@Kuf8($pSR>ruSsQ{)(e;IvNfFY^|QMvwF&o?LgNs!t+Zm7X2*ErdB4_ro_r+6_E5-Tu=oWjzQoui z0_Kc;B7i%Bg9Eb!hlrzsL%=b?0b2YP8<26uU%e$b>?bZrV6ey#^A`@Gpo7IPNbw=2 z-~bAcKkf()4$Kl9B900U0mlRf2r+a-kg>$*U|290iE&L}u*jKYAu$Dqu=oWjKExCp zKmqdixtK}?hcLuZ!6EoWO{Ic^OEcC~f~KRX1am24O(i`=&cuv0l?o0nB}UN3D!v(W zDis`n8FMP(C9POhUvi34j3gwV=)@*6aZzw^gGdoUa6Gl5Vj+n*L;=C^6v6S-@g3gL z9bCnc?n*qZNX_7$>o&NGZ^-%9Sx+Bu{# zx_*slkVTL@f<3dV1=ogBsQ|K_R`Fzv)QV_~=@m|rMu-gO6lwY-!)d;ersY7KHj$4|2~koP^KD*Foz}E=HirYnuq!lPJ84_IjzdV^Jp#Wwoj1r z2ur;EXXInP$5`^Ydz?x?&GOHA&(aLgatb)@ahe2LhV>H&_&ECndNO36K+!<^1ak60 z_@oWz)KkzWEl%_85onn?P8&)7eFDYV9Xtar;0AOS`UHZ~?h^V<#RP%I*}cc^Yq=>FtmLe zJvBKeskAE((zLrP51P$4=O&!<39|B_U0HIjJpA{83Vy6hFtLx*%7Y;fqr;$DUz)M4 zJeFu$3tP}Rou^r?uhdVapF&@qugp)CpCUgseoB0KpN1kI=K;ol^bh-yKim)XF(2{D zdR4umZWb)*W?=bWJ(v4szWv|&=TUk!Y3^ZqJ!$Q6st#I)<=+Pqnyx(!4MV2X#3pf1 zl31h9=K$xlO4&ouIo}?EmYL(Ukt!)=8(q~jnyp6MC*8QU0VSS#1ErmQ0}Z8u0~~)E z&Xb{1aiTG%<2bdVY(ZIdqt@iYNiv+~yS3yjC1)ksLQ<9YPpxS_>%E9)4XM-ZPOt+SJN!GYweY!S&?#<4pysa&XpeL z-X+c_$}vCIRRvyW10tf7p-w;l9^#Z{uFZy^DJl_a;`;Dn=w)q}$TlD3sfJ8e3D^ z+S4lgZtuAs>9+W`BGWeCR_ST0Z>#jU-KR0Tk#^Hh*GRYVr&YS${BvzhCndpBod{4( zBIG1JJ?Y6%DN3;yvA@2c)r(khBN^wk7jgAX=X@hQZHZT;{paTlE9vP=PP5*NxJv(0 zsekb=IJn=7sG8%t7tz1yK;H$9KdtG>P^nE(($kxqB#n?9&MDILNruyWBTdVJIBg_J z`|ksVvpaYOT)+(|=@cj^Y5#p7(NL;TproxlXc!|UDwgS$M`DydNrQ7*rdJ+Qq~0qz zD-T*5sZ3WM)1}yLq-A>L;g4ZPFVmGr@|2!3U3thk%40mjmGkJ1ZM7WNQC%UA=*oCB z$8sbq;V7nA5v&dsztM|byAD{5&8;{_Tt9Kr+1#ds+c;n%H@&-H=v?OD| z2SHPBdXLxWk-yaCD4dRld&cp=p_4wkyztP#@2#i@E0ds)>k=plRwW^`c=(yMCzq)B zjkPBq+3Q4d3)F%irv}BO6#OPpHFc*n& zO<=IdnPed`e=QRhzaYhjm|7DkK>oRtG9+4tCCx_8rj5qvl{i`vG<95?J5LcPd$#5~ znkCTAjWolkmPGE%dTiZa88E&q=w8RzTyh;}v&l!51LHoZB4PZP3V8P+1tH7VFvwV@ zbU*;iMVed_87y)pS(r^fm%`!~r1+5YBMB5Bf8F_`k;PXQ&0!fVe`e3zmAUd(*22XrdISvQrmoFY#1 zZ7OLojGc3KDwWvb><*p*7tpD6Qz1H)1tGX54YHlT0 z{!)6x8_76Eu@|FSD~)cjWs7a@qnYxrA|9=$%E`^`BfG9gvtw?oB=f?C_7VNYfkEm`#7*rc;FEF#A%PAH zGL{D&3fYk0R_lEcS;<2lVN3UBWLG~#^^O`v?A!d zaczxxia=9mYrdmd`P;dXW*F681MbXvY+Ekqi?1&P*D>~`;5yDtC7hLE!jLd(Fu@s! ztpbco=QOGy1VE=Qo}hCSOAwO&S_Wj%>`Jg-%fKdBcK%uhILA^`Z2`EWwK$-Z(h_l3 zX9+l_vOtI-q=JkkmcSBokr>wm28)~t#8OsZW+slH1t~tn)KowL^2hx%e=zdbG5{xj zE8xJz?dBu73^I0&$nd}L4d$d2rh{n#VA zBfx}9DR{z*lQp(=%+rjmG1jHwtgA8l7}#6$t;T3EjGc3a9*x=cKD6S~RgE#m0z#EO zYpit)gfO^Rg|eOT7>{s#dnNWp^g{GL>~+}Nu$N))LN(T}WyBlFI7V>{dz|qbz1T(0 z`mg+}erpWv!yz#@ilXjp6a^jKD1v5@jiQiQDRiS~PcG%pjiNpINQ{l5ki}r}3sQWE zu~7uf8CzHYck~ShW~mzyN41TBBbb%NZ?ORxSNzo*J-Jl;m0W@bONc3Fge(S&Uy$NM zOdSJ2Ocet#BpQYTvlNVoqxwa_G4%pM3@s95EHOG57R*IrToV{9awb_w%wNld#V<(l zA*NOV3Xs3KU@Da*M@}-`P@BDrJrW`=e%cW25312oc1_P0xiS(2?TtceF8lhvQMCBpnU>4 z`5=7KhI8sE=#v(w`Su93%p9kUB>z5v;_MEd0T*xsItzUQL23611Pf(5!JZM*zoGTB1Ei3r3F2Te;PAd)W>|p zE9+JDin>{_q?>`|fAw7Mm-+T{k7d1_C#{fB5?HpE>*Uq+)dbdKId&5_jg`on%0jW3 zBz0CoE2WjxN^2#y&MB`a~CKRe-aOBP}U#GC!Q0H%`_Ar#bsD z{#q8!?%+<*_17{D%w{SKT(HGjD3?5B&KPXTv$;6c0nKXK^YO8!X^#Dh!RBbK9BiK; z=MlE#*?*!O^F79vJnkN+>VoEZlwM7mdzfBNT6>(TgO*|WwM=Nb_B8!kW}mY#spe_M zJ_sUgIHy(09)era;p9l1%p9jVS5nG0x~ge^Ewd5#NjF+mO~g}g^eot78A=5QIQ}%8 zCqt#;#NLaQc0!K67L-*t4Y_cV45#^SEjdfcSxL5#ROQojn9q7I;#or~b$=~0Trc9* z52Xq{l4gIKyqmt8z)Rsx;!Wcv^3ZVFR^{nDt|i+37Bn60Z^0t_FmAl3_Y!?o0S7sJIPk>gl_)&q=$t@vPG}9g=f3&4RnuUP+o2DQD?mwVLK!>2dB| z;(VeU^J86A;B`h`O`mm|?%vM?p`lGPrxWO63UeEJ(wodzNnbFQ; zoToSsah~Bk!g+%80OLRUhyBPO?t2~gHtuEIySP_zZ(=W^iV=wx>9+JX3gxz*#@3X! z_O!~r+k37@x-Gt~$h6J3ReIX$+bTV7_i4;-q}}w>HPUVTX_am_|6G@B1pwEjIuW3n zM7~LSdeW1jQj}sZVw#ecq*ImRoc1EFzNaj`=Qx~tc~;}8OZp@|eaUGf?L}Or`)ip7 zy@-lAt`}^ve14TC;P}&;o(z@R6pb;x$w|@($>E$LO`l{q%{S7t9Ej6KGSmKA7S8VA z8E^qNprlivprrk^Of-}#6ewvckA5w)&+?cQ^)%x$U3nli=d?_(Jf=u7U#3?c6Qy)Y z8bvykXTnrmqnh^DGVhe>$|HG7PnoVf7wi6 zm}W(=I#m2ty|Ig2+^XD|LF~ze&~nS6#A36dxN@6;tYU-V?n0UBzQRp~I|{cG?j`tf zj9r93jqw+_{`IYblx+ih2L9TX-2&qUtxq9dETJZLBjyE0}!3KCLXbBbI zgP^H5y$1_FRsF4HA1S|3exCd``C(E$C>>s0WW;=!4RT>7%mY~<2WG$`@acVaE5PUW zshNJB*ym;LoqG3oOg!_>GxCHypL5RRagc-_P$VrrqCi@DNRYJi5JASKoeq);=3?5p zCT(DmGs(iV`^3WH7o_;4-NyzBkiYKy(a2(`7A%D-p+c|@EQ6}RByi15^oMMm7tm<@7aCY)@tKn|Q~C)%g?*_~yd+oxuVnPDfG zr}bIg<}taA&bG{ z7o_--BIkg#$T~psUhs!>$tp3Slt@StsC;rK5+xyk>0v8(Xs8}}3Z)wMeb_de_)*g&}DaXat6!A*nP z9JdSZ75KYzb_xEyIhz{)YTE4#dl@z|>|)pruoYk*z;@Sz8R$J1nG@f0fjQ|t2bptT z-ymb>oDOCb%*8q9nmGfDoJkhWx$n8K_ys9GbMAW%6d-@y`J<7QSRq=6m0?wA5n6+l zpmWZZL&YU3F^Pyby%!6IkArUlbN(%J{;f4<$yssCIa_Bp&Kv@nIf_cM!|WwyQ^ z-gj^}7l4)y?#==*k~h8Q;A|?|5)(y724_RzR@mQE+y?uL3R_?|z-@oq{JQmR;|qA3 zf@AVIG-7aoqOt%0YR3`)H1($U7~IWm|Jw#c(UF3q7X0DF4Xr<$_^Sz9SvRt6W81{K zg>3`d_SMZRTTu^<7#!ep;@|*Z4+jT~)tAZ|9>Y7#|9o}9G%1Be?; zx0n9<@#5z`-fR5*Ij^<#D#ZXQ#1G!~%7)D_b#$X6WV1U<8 z`16fN@W|CWkJo~?pL4wN@IGS!PX#R*4)`Ew>P_!4=4;k=j>73^xMv&>96ITv%L@+; ze4|7?c&7w?e5*i7@Lma-#lu(2o?N2hifT_jvf%|2vKTCWL5eRa-ZMa2ylDXL>@GPl zYkw)?=nhlBu{|aTG4_x_#uB50VZmG^#x;S#B4?6?#N2QSi(io9L(E+#P=Ne%CuMB) zGAwB}ayD%=Mz6%tilC|E+T3}HK-sf3-_b0Ac5b8@MzthzXVzouCe48HWx?_~#^#di zIGatDa}JDK&PBqwoC|ojoP&^Ml@2nNDIE|1bCD+3LX$fbK= z0Al3QOqR#$%N$l+<_W0EDt&u49rC;ToV^8awb{G$4#@a_ys9G zq}(|J1;{^lN}TO#hLyREoSicoqt~p_ilFnxwKe7`0!^K*`Hp6@XXi$mVN`n!xHIdq zZMmQ?zP=D#$Jm#G>o_-+aCU1g3<=}zT5twp>js-k=QOGy1VE=Qo}hCSOAwOow1F&| zT?w|^2Ag2nx#0$!W2w3425@KB%>kwM-6HPpyagQFdxH>TZyjVTu{PjhE)wIKz+jOx zfmr)+Ff$WJ(1H{nV(!L)0_2bTXZ~R1o*cl5{SgN)Zq8n_KYGpnh{OA`aeoA8DHZoe zfTrH`p6OwK#8EgM4F>`CN3Yo*y=H&(n*Gsh_Q(MyTuQ+cUYxA4tz({MY>lxl4QE}A z(Z|5vnr}5mi(%}XGxTW8uJ@r8pRQ_*F%}T2^jTxAV<3dV#VVBTjK_F{?&ib$XtA1;YT|S4z-1dpOv-J~nbo&RI zMYe!KW~I=Lpgp;iKevPSyHC0*>hb z(Bik)fQ&2t>W!XUD*j3?L4YO1lmtQ+gT*gM@gb%%03fC|03e1s;J_?>AmXS(5O7Q* zfDl6i1Q|<=4u%DDkr>wm28*0Y77|l12#a5k;zLZ;02Cm9GsaXZ9fTo{st3U*YAO{F zT$-__5;Pr6C74SYYbxm}awcZ1sZ>mGDY4`PSbQ_)R4OR|Gv-vnOIoq2zT_087)eMz z(TPoD;-bXh29Y9y%6M%>#X=HsP-?uE0b=8|<2$^gJGhD?$uZ3V;+E=|?3nIIc%Uo1 zWgUSG1u__zi&VHKE?DGDvT(W*AyXj)EeM7vDd8jYpv`4;_)2b{ykJhqo`vf_Uu*BPcMn2|yj3uAD$EozwEdQMM zEX@Edr-0KQr%9k?SU-V)kF!spCqwoL6b-abASWM$Pug%!Jq3Nz;xykLftH!$w2|cB zCs3T-!870jZa`H zXnF`SWV_rhv*op{meVp?K3CJF6WJj-Pv4ylL)*8}QvcJH6E}^O$ePMRv6&=wRzfSKmDEaWCAQEC+)ZwY+D~wC zvN9&QJem4L7id+0vyLMzDR44BoSZjK)&r+G`!N1u5zg-5PSN$Z3k}R>Dhyn(#abwr zJY>!oY{|2^IMo5oYTEPhv8HK`{ffcnXssM{e*_ZB1_$jEk4QlG?yR*+p zySMSI(>5KFb2ZI^yVhPwniVN$>0q^*=3MDK9_hCDwj$Fu-&W~qt8c6H zxZS5QyODO&PuEDd@uyX~-TZT1vK0Vam+C}-Y7+S->FG&NhDuS2y@>tw1uaRZD#bbN zMO=MPS$fZLIQ8(pDb* zie;bWF)8Y4#$~$lKx)ounO=EJkz&3~uRJD7>6Em;aE0$CJf){h zR~~YX@))n4|sbT9Pr~gP^H5y~k_xjK9?7D4dRld&cp=p_4wkyztP# z@2#i@E0ds)>k=plRwW^`c=(yMCzq)BjkPBq+3Q4d3)F%irv}BO6#OPpHFc*n&O<=IdnPed`e=QRhzaYhjm|7DkK>oRtG9+4t zCCx_8rj5qvl{i`vG<95?J5LcPd$#5~nkCTAjWolkmPGE%dTiZa88E&q=w8RzTyh;} zv&l!51LHoZB4PZP3V8P+1tH7VFvwV@bU*;iMVed_87y)pS(r^fm%`!~r1+5YBMB5B zf8F_`k;PXQ&0!fVe`e3zmAUd(*21K4aJQ%*GCgP@c6Yxp7fsjCl1sThO4h92rkqXzu1&f?X7V`1eGGXxxQhZ3M z(|`ixpF1UvyveXKw~@1RMq~7vHChpL-nh2LJVl_Xvo+t*to-fVNHdITuK{;vJ+>_u z^u^Z~g6kOjQg9vTrV`G|Fkwg-HJIQG#MWQSxO7gV3PJ#M>f#AHN3jGU>91u#7R{~% z(_CN^EIWTK1Ds>2`D+<~J6ekaN+~T7cXgJ4BbXJ$)L25s5=&r-xk!v_0)zQv09)N{+(mXgCP4m3+pY>=`?9fXK$3O^!i&ZGw8ISP@$G2BvZ$vLd@55e) zy$yRA_AXRo{aQx6k&I&$$FRp4ztM|b+aA20Y5ph)82snaSS^O3o zka5Ldz0s3P#b3$MlMlp{GeQ=F#V<(lA?B}T0L1*Y48R=?!+}`}M#NG5BH);M0U?GK z2{M)#9SjTRA~CKB3>G<)EF|WyWy0bYr1%h1s{jSa-{)c~l_|myM>UGz6E&6oTE?Xr zYbrt0(Nu!Dl(D9go+4*r#+ph+36~NhXk!)Mj5(Fc5x|T&mGF{Qtg0_LMJYxSl23GE z6PdUuKe$1ph@d^5Sy8c&L>!`k>Uf6ec;@&H@8}Hv@Pt3#cm$7Jz4Lf2c>6iW8xQZ3 z3gD@rC4&PW1Wmo^J?7?t?;=OxbTr&Ejt35%^wH&ohX#IHL_PSl2>STigOcF27cz^7 zuf09FM8!{wJ^9FnPm7SnVDSr5d`aydu5O7RzfDl7R1Q|<= z4u%DDkr>wm28*0Y77|l%2#a5k;zLZq0Tdwr+({Xm`3y^%jhsyzjnOM{v?6HgxHfm5 zB2e~h&380Qpq(3OhEXkv+?n;*`iVAReECRw9bWe%&Z@@H+CyE3QX z;EJKjnSz5WB9$!!$Ag(ta6DK~m6KbU2L#81*)ca(l6gUJJRmqYFi3ESxG6XUd=eZW zB+wB-#`2(p!N6Rk!ZmThB4?6?d=wnQ;uoa&kWz2}1;{^lN}SDnhLyREoSicoqt~p_ zilFnxwKe7`0!^K*`Hp7gZ|6puVN`n!xHIdqZMmQ?zP=D#$Jm#G>o_-+a8_^#L&7LH z1ZN<&3Jxxv)2Ms3>^_ul2xIi^Kb}ai0ZfDHS(afTrH`o-Ej3aTHER!$E+p)dM@L2X=f9 zZ1^77#|M~jDFsh>ak9p?j(M7~HO9I$oOLxu9|L=9zSS5lhOu+b(4#TC-iKCvx~eh8 zSU{-KXN|Rvfe;24t5CKx9^(;?Z?DAOh+c@^hrJGa8}>5nU8u$q9K;*RI7V>{dz|qb zz1T(0`mg+}erpWD!67mCgQD*22n8M86M|-uU7?U!DRg6KPcG%py`eq%NQ~{Fki}r} z3sQWEu}cKZ8T&*4cLWCqW(f`vM+Jv~V}b*;_${^shsdZP!-Ba~{FNL%Mb0D(iTUe^ zu=tHwe26JH0Ej6#0NfEA9GE3IL>v_y0*(m|5MtPrIh6_yz>GPS@RC-nsxLW3DMk{KPjq4vnYbu8xIv_dAUGbZs8~oM4pEQ+ zg5$yQ9p2Fy{L>TueB%*3a`n#RwczdN9B(|lPbz??f|d*pd=NDCruUeePka|S3a6vt zo^d>I=%kM>FFZ8x(<17@r$x}m*B+Dvuf332Jbdl#$t5a&TI|V3HhfxyEC!2Tkm5^< zPYWO|UV8v{1P2FZ2@VlQ1&4rRf&+vYIwHteVstPpn2W@?CNNm!OtO%efGW!wn-vL>|(Jg~P|e44EIZ zV{Xihd8x)s3!8aN#B;)xmQ%Uhcycz%4(L>Zvu-MBIYpf2+f>qG7(3_eRQjO~XLs-n zxPVTjA8OI5EMMSa70TX>$GF<7^j3K*yy~v9Th*=TR&y)4@|WNs-bltVioF=sT4_vh zU>3W`#f@AlH~@%|OEXy>t1oj{b(KGB%iNVY1qW9QRn8O~ToI{kAvivnDFw$T>#1^b zEAxq6-zT$UZmcBpg5das;NZX@!6D+N;1KXhaDb3NM+6zmgAN7*bCC+y#086-Nfz=^ za0rWEkm5s1!2uK?|J*5YHuD))<~DM6&S;EYvqmd|&KuX(n5PIdb++a^nw7tu8)=47 z?KR-etjD(Hg1-3rLU0{pUka|{+*HC@!66I@qu>ynf!HcIxO7gV3PJ#M>f#AHN3jGU zso(&zXm%x--~gLo*(o>x=U8eA4ghxq2M3fA93t)t4gtpm2M95AM3Aw>=wMhd7m0CA zV6e!UWKr>3cL@%#W!BPDZ~zcfZ~*z^{+U0Fq~HK>WxK_Ji-O}5d#z7wwK%*l8~0g& zmQrz(1!(F`@5zGw6-VK8G#mujT76|LnF5*)-E$v8%F411jM8@<>?&ib$XtA1+?!NDOh_k*JD><9%N z-4lXlkzJvXSt)d5XiqNX&%L2N`ACfIp^(L3@e5LXiLpxr%o+Pc0Cxlj2WANl5l01w zfMbFKwD>JHAmfU^dP{KFPh60|V38r_FC0Qa2a8{j;zLZq0TdvA+z}icm?bzw92Fb_ zjtLGBV(5qz)b&`BR%UU+EWr$y9*Pm7?BuRSOU zUV9<4c=+1elS@?mwAho6Z1}VYSqv7xAjOvypB6w`y!HU@2o4U+5*#9q3Jw9s1P2H) zbVQJ`#OPpHFc*n&O<=IdnPed`1&6Ts1t~tn6dXVS^3R==v6;`Xq}j;Xw9y#75=Se7 zrjBcK=P3eZ&(?fLvjp0?k!BdxlE|G|kFB3*1ICw+wAV2ktWwf28*0Y7G~4WkFfX!DL&-<@&E2aWv;xHwQ}Z%h8sqTh&-eb3x|(`88Sa+$K041^HPnO7B=&ki06bWEvIt1@#Jij z9nh%+XWdlNa*8<3x2dGXFm}$_sq{k~&hFqDZ~>i4Kh&aAS-!x8#v8MDQM zmboi)3J$Iqs+=h}xFS;7LU4RAQwoj`)>Gx=R^|h{z7J-{+*nEG1;OzF!NGw+fTJz-G%J5QH_{BF+H1g_S&wbY1%2`Lh2T2Iz7$-?xv7M+ zfz6@&oj)Ws8Yj$#QyQo#Xa(dH~t~1A^lN`}hD8E~VfJFHY9j)-g{rw#HbOhO@55=wo1S&9@q(#V~fx8G1Bk*Za_l zPgga@7z+qh`mC|mF%ZJwVin4E#$!Cf@$Hq^8_^5V`>@wxZ^K@Oy$jV?f`fP?8OJD& zVUII@qZhl#S^t%P)o+aYABnL& z6tWmBenE;aF?NZ7Ib)v);Ev$nz%0Qb;;7&da7=K37Qe*?WL)uAZ}j9+@mF#Q4lE(2 z;1IGHEPg?X4>1J?05N~z03e3o;J_@wA>ydu5O7RzfDl7R1Q|<=4u%DDkr>wm28*0Y z77|l%2#a5k;zLZq0TdvAGsaXZID{dN3J$?1YAO{RT$-__5;Pr6C74SYYbxm}awcZ1 zsZ?-qDX|0xSbQ_)R4OP_!4H!t}v zauiNS!#(48;Lu4QU0!%-;HO2@gHMZ~kFPx_30`|4vv~O0+mlOF{IuAUk8Jp~2w4mk zzaYhz6rUDATDrmF?2+bvBc2pqu>x0zaYhjl!5~&K>oQ?;%w$KtjulX?3~dUy=IM81f4gotuap#XzFaucQh-1 zJ2%n{quOi0omr1<%LRS$^@ZR%#=aC>$GNG5vw}ky5=OxxI0La&aB%6IMiqnr=+wm% zbdF*PLQ=s2WYO$OFu?&f!Ln0u0M4=06dVBV2o4S?B{)Rf6&wPN2@Viq=!hU=iP6EZ zU@j8ln!sR@Gs&Xjx9$=gV9Ttfr{Dk}rr-eb$Ne*Z7)ika;L3K30~ZCyOZHkX*=lik zUpDTu04=5BCJWHio8FTJ`zwyZ>1a3zu(f*0&gvyQzL#wHUb2r5FyT@Pp77#ijcpzC zG-GRwb!j;3YK%Sx_SSr>Fi zBOKpeiMs3>^_6enE;4F$D)ufc$+frc%Kn z3~^L&2tH9$so>z!j5U>@>1Zm!T*_EeNl%e8F=I`of`iL4=5C+s;+rw2Qo#Y3F{ct< z(u!5}C8sFGNJ8?7PHZ9*7X=45h!hb7$4e_J7LtfV6l8$lcY>=eIw&e)i;7$4|d^{r&!*J$caRF9xFi z?8&c%M*1`GL62X7%!%-8kf;0Ep6+Kn*^ie-1kA`~NF^`FCPk$?KdIr}N|`12PK`uPiP|D4;P&H%T6&h4)Z zae#rpdh(@qmGRXJ^8JWL%<|X2 z(P36CyaOiGDBlOuW5IkM9PTH-`5y-Oi3gE??LnS@^4&pxe{zl=RZ~B5`hP^l{>a+< z(MyW&M=$GpY101SrIq-nC_5S!^B=tIP5t>xG6~T0;{qQ1Vzz$#t=_*NUqAkg*{^nY ze>CSh{>dw&{_5TzLgtTNnqw|})xLUZtUr8J_}}NoKg?t3WaRtfC*NS(W!}FEXq>S<<)gP`Evb>>w12@ydJJM*IWMo)AjFNk1wys!ISIBbv?bl$>fi( ze_5bU*B8unkk;_s^{&>3>%(jwuTPKHN27eY{`fsH9=P-7`s@F{zig`hI_&@YpYk5z zPc!gWoxnd0^sjjF-T41%?%#R;fA#$Rr}{G-|Mh?RA@_f@-2544e_`Jbe|Y`Q?_9rg z{m=aWzjOV&Ke+z4>wo7_E~PVVPJj3BUVrcJak_x|vG~7*Jc;@3>$k5TUf18b{>$t4 zuD?N8LKv~*x30f={Vf*M-!azpyVvzkuYdFUx32%{&kN=w)c^gA|7*PdE~Rn$8`r;a z{Z`!n=Jjv)h(6YD^I$)`{`U3nU;n}NA71~_^&emV$@QOJ|Jn7QU;hQ^_&YrG@ABBc z$MgQz*MD>U==%HDe|!D@^$)HeUq8A2;q{NMpI-mn^|R|AUq8S8$@Sk~UtNE2{SVjw zc>Ut~!|UtokI4K#z5dzt&zYrv!B^<%^^A|ui|dzsj9y-^uGi;__3nDlhvk3qQ^S|$ z^YzEqx7Yv52kKuk0e`|Y{Hi{F=v>12Z(ZNMC*UiLU*P=Z$@S&)_2vEb=`)Y?tNw>4 z*Tbvp;q&$I;(B<0JwCY}UtN!%ug4eHBgRjzPp@?O^y2#T{(AT1diUyj_xXDF;(GV~ zdimsf`RaQ4`Fi={dinl(`@~Q2+s}QIzc=E?C)dYU*T>J-#~0Vf_t&=sDQ`UDH~e{| z`R?`g?j;ZY{5enf=9}xy+w0BK>&@f!=F|1&!}akcpX{gCQ~vP9z8)Vh67%{sxq2WQ zAIQ{q)99J6`HP;H(~I{!{dYIN(E4IBpztj>-+%Acd-C)x_rHB{ef!E(edqLoti2+Q r-?cse?H^q~|EkyEPp+?j_xi=}cK1)HlO4asY=7VWFS_3R@do^#rdNyu literal 0 HcmV?d00001